xref: /openbmc/linux/kernel/rcu/tree_plugin.h (revision f79e4d5f)
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  * Internal non-public definitions that provide either classic
4  * or preemptible semantics.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  * Copyright Red Hat, 2009
21  * Copyright IBM Corporation, 2009
22  *
23  * Author: Ingo Molnar <mingo@elte.hu>
24  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25  */
26 
27 #include <linux/delay.h>
28 #include <linux/gfp.h>
29 #include <linux/oom.h>
30 #include <linux/sched/debug.h>
31 #include <linux/smpboot.h>
32 #include <linux/sched/isolation.h>
33 #include <uapi/linux/sched/types.h>
34 #include "../time/tick-internal.h"
35 
36 #ifdef CONFIG_RCU_BOOST
37 
38 #include "../locking/rtmutex_common.h"
39 
40 /*
41  * Control variables for per-CPU and per-rcu_node kthreads.  These
42  * handle all flavors of RCU.
43  */
44 static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
45 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
46 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
47 DEFINE_PER_CPU(char, rcu_cpu_has_work);
48 
49 #else /* #ifdef CONFIG_RCU_BOOST */
50 
51 /*
52  * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
53  * all uses are in dead code.  Provide a definition to keep the compiler
54  * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
55  * This probably needs to be excluded from -rt builds.
56  */
57 #define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
58 #define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
59 
60 #endif /* #else #ifdef CONFIG_RCU_BOOST */
61 
62 #ifdef CONFIG_RCU_NOCB_CPU
63 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
64 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
65 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
66 
67 /*
68  * Check the RCU kernel configuration parameters and print informative
69  * messages about anything out of the ordinary.
70  */
71 static void __init rcu_bootup_announce_oddness(void)
72 {
73 	if (IS_ENABLED(CONFIG_RCU_TRACE))
74 		pr_info("\tRCU event tracing is enabled.\n");
75 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
76 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
77 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
78 		       RCU_FANOUT);
79 	if (rcu_fanout_exact)
80 		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
81 	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
82 		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
83 	if (IS_ENABLED(CONFIG_PROVE_RCU))
84 		pr_info("\tRCU lockdep checking is enabled.\n");
85 	if (RCU_NUM_LVLS >= 4)
86 		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
87 	if (RCU_FANOUT_LEAF != 16)
88 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
89 			RCU_FANOUT_LEAF);
90 	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
91 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
92 	if (nr_cpu_ids != NR_CPUS)
93 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
94 #ifdef CONFIG_RCU_BOOST
95 	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
96 #endif
97 	if (blimit != DEFAULT_RCU_BLIMIT)
98 		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
99 	if (qhimark != DEFAULT_RCU_QHIMARK)
100 		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
101 	if (qlowmark != DEFAULT_RCU_QLOMARK)
102 		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
103 	if (jiffies_till_first_fqs != ULONG_MAX)
104 		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
105 	if (jiffies_till_next_fqs != ULONG_MAX)
106 		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
107 	if (rcu_kick_kthreads)
108 		pr_info("\tKick kthreads if too-long grace period.\n");
109 	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
110 		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
111 	if (gp_preinit_delay)
112 		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
113 	if (gp_init_delay)
114 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
115 	if (gp_cleanup_delay)
116 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
117 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
118 		pr_info("\tRCU debug extended QS entry/exit.\n");
119 	rcupdate_announce_bootup_oddness();
120 }
121 
122 #ifdef CONFIG_PREEMPT_RCU
123 
124 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
125 static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
126 static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
127 
128 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
129 			       bool wake);
130 
131 /*
132  * Tell them what RCU they are running.
133  */
134 static void __init rcu_bootup_announce(void)
135 {
136 	pr_info("Preemptible hierarchical RCU implementation.\n");
137 	rcu_bootup_announce_oddness();
138 }
139 
140 /* Flags for rcu_preempt_ctxt_queue() decision table. */
141 #define RCU_GP_TASKS	0x8
142 #define RCU_EXP_TASKS	0x4
143 #define RCU_GP_BLKD	0x2
144 #define RCU_EXP_BLKD	0x1
145 
146 /*
147  * Queues a task preempted within an RCU-preempt read-side critical
148  * section into the appropriate location within the ->blkd_tasks list,
149  * depending on the states of any ongoing normal and expedited grace
150  * periods.  The ->gp_tasks pointer indicates which element the normal
151  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
152  * indicates which element the expedited grace period is waiting on (again,
153  * NULL if none).  If a grace period is waiting on a given element in the
154  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
155  * adding a task to the tail of the list blocks any grace period that is
156  * already waiting on one of the elements.  In contrast, adding a task
157  * to the head of the list won't block any grace period that is already
158  * waiting on one of the elements.
159  *
160  * This queuing is imprecise, and can sometimes make an ongoing grace
161  * period wait for a task that is not strictly speaking blocking it.
162  * Given the choice, we needlessly block a normal grace period rather than
163  * blocking an expedited grace period.
164  *
165  * Note that an endless sequence of expedited grace periods still cannot
166  * indefinitely postpone a normal grace period.  Eventually, all of the
167  * fixed number of preempted tasks blocking the normal grace period that are
168  * not also blocking the expedited grace period will resume and complete
169  * their RCU read-side critical sections.  At that point, the ->gp_tasks
170  * pointer will equal the ->exp_tasks pointer, at which point the end of
171  * the corresponding expedited grace period will also be the end of the
172  * normal grace period.
173  */
174 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
175 	__releases(rnp->lock) /* But leaves rrupts disabled. */
176 {
177 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
178 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
179 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
180 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
181 	struct task_struct *t = current;
182 
183 	raw_lockdep_assert_held_rcu_node(rnp);
184 	WARN_ON_ONCE(rdp->mynode != rnp);
185 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
186 
187 	/*
188 	 * Decide where to queue the newly blocked task.  In theory,
189 	 * this could be an if-statement.  In practice, when I tried
190 	 * that, it was quite messy.
191 	 */
192 	switch (blkd_state) {
193 	case 0:
194 	case                RCU_EXP_TASKS:
195 	case                RCU_EXP_TASKS + RCU_GP_BLKD:
196 	case RCU_GP_TASKS:
197 	case RCU_GP_TASKS + RCU_EXP_TASKS:
198 
199 		/*
200 		 * Blocking neither GP, or first task blocking the normal
201 		 * GP but not blocking the already-waiting expedited GP.
202 		 * Queue at the head of the list to avoid unnecessarily
203 		 * blocking the already-waiting GPs.
204 		 */
205 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
206 		break;
207 
208 	case                                              RCU_EXP_BLKD:
209 	case                                RCU_GP_BLKD:
210 	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
211 	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
212 	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
213 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
214 
215 		/*
216 		 * First task arriving that blocks either GP, or first task
217 		 * arriving that blocks the expedited GP (with the normal
218 		 * GP already waiting), or a task arriving that blocks
219 		 * both GPs with both GPs already waiting.  Queue at the
220 		 * tail of the list to avoid any GP waiting on any of the
221 		 * already queued tasks that are not blocking it.
222 		 */
223 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
224 		break;
225 
226 	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
227 	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
228 	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
229 
230 		/*
231 		 * Second or subsequent task blocking the expedited GP.
232 		 * The task either does not block the normal GP, or is the
233 		 * first task blocking the normal GP.  Queue just after
234 		 * the first task blocking the expedited GP.
235 		 */
236 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
237 		break;
238 
239 	case RCU_GP_TASKS +                 RCU_GP_BLKD:
240 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
241 
242 		/*
243 		 * Second or subsequent task blocking the normal GP.
244 		 * The task does not block the expedited GP. Queue just
245 		 * after the first task blocking the normal GP.
246 		 */
247 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
248 		break;
249 
250 	default:
251 
252 		/* Yet another exercise in excessive paranoia. */
253 		WARN_ON_ONCE(1);
254 		break;
255 	}
256 
257 	/*
258 	 * We have now queued the task.  If it was the first one to
259 	 * block either grace period, update the ->gp_tasks and/or
260 	 * ->exp_tasks pointers, respectively, to reference the newly
261 	 * blocked tasks.
262 	 */
263 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
264 		rnp->gp_tasks = &t->rcu_node_entry;
265 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
266 		rnp->exp_tasks = &t->rcu_node_entry;
267 	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
268 		     !(rnp->qsmask & rdp->grpmask));
269 	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
270 		     !(rnp->expmask & rdp->grpmask));
271 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
272 
273 	/*
274 	 * Report the quiescent state for the expedited GP.  This expedited
275 	 * GP should not be able to end until we report, so there should be
276 	 * no need to check for a subsequent expedited GP.  (Though we are
277 	 * still in a quiescent state in any case.)
278 	 */
279 	if (blkd_state & RCU_EXP_BLKD &&
280 	    t->rcu_read_unlock_special.b.exp_need_qs) {
281 		t->rcu_read_unlock_special.b.exp_need_qs = false;
282 		rcu_report_exp_rdp(rdp->rsp, rdp, true);
283 	} else {
284 		WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
285 	}
286 }
287 
288 /*
289  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
290  * that this just means that the task currently running on the CPU is
291  * not in a quiescent state.  There might be any number of tasks blocked
292  * while in an RCU read-side critical section.
293  *
294  * As with the other rcu_*_qs() functions, callers to this function
295  * must disable preemption.
296  */
297 static void rcu_preempt_qs(void)
298 {
299 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
300 	if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
301 		trace_rcu_grace_period(TPS("rcu_preempt"),
302 				       __this_cpu_read(rcu_data_p->gpnum),
303 				       TPS("cpuqs"));
304 		__this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
305 		barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
306 		current->rcu_read_unlock_special.b.need_qs = false;
307 	}
308 }
309 
310 /*
311  * We have entered the scheduler, and the current task might soon be
312  * context-switched away from.  If this task is in an RCU read-side
313  * critical section, we will no longer be able to rely on the CPU to
314  * record that fact, so we enqueue the task on the blkd_tasks list.
315  * The task will dequeue itself when it exits the outermost enclosing
316  * RCU read-side critical section.  Therefore, the current grace period
317  * cannot be permitted to complete until the blkd_tasks list entries
318  * predating the current grace period drain, in other words, until
319  * rnp->gp_tasks becomes NULL.
320  *
321  * Caller must disable interrupts.
322  */
323 static void rcu_preempt_note_context_switch(bool preempt)
324 {
325 	struct task_struct *t = current;
326 	struct rcu_data *rdp;
327 	struct rcu_node *rnp;
328 
329 	lockdep_assert_irqs_disabled();
330 	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
331 	if (t->rcu_read_lock_nesting > 0 &&
332 	    !t->rcu_read_unlock_special.b.blocked) {
333 
334 		/* Possibly blocking in an RCU read-side critical section. */
335 		rdp = this_cpu_ptr(rcu_state_p->rda);
336 		rnp = rdp->mynode;
337 		raw_spin_lock_rcu_node(rnp);
338 		t->rcu_read_unlock_special.b.blocked = true;
339 		t->rcu_blocked_node = rnp;
340 
341 		/*
342 		 * Verify the CPU's sanity, trace the preemption, and
343 		 * then queue the task as required based on the states
344 		 * of any ongoing and expedited grace periods.
345 		 */
346 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
347 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
348 		trace_rcu_preempt_task(rdp->rsp->name,
349 				       t->pid,
350 				       (rnp->qsmask & rdp->grpmask)
351 				       ? rnp->gpnum
352 				       : rnp->gpnum + 1);
353 		rcu_preempt_ctxt_queue(rnp, rdp);
354 	} else if (t->rcu_read_lock_nesting < 0 &&
355 		   t->rcu_read_unlock_special.s) {
356 
357 		/*
358 		 * Complete exit from RCU read-side critical section on
359 		 * behalf of preempted instance of __rcu_read_unlock().
360 		 */
361 		rcu_read_unlock_special(t);
362 	}
363 
364 	/*
365 	 * Either we were not in an RCU read-side critical section to
366 	 * begin with, or we have now recorded that critical section
367 	 * globally.  Either way, we can now note a quiescent state
368 	 * for this CPU.  Again, if we were in an RCU read-side critical
369 	 * section, and if that critical section was blocking the current
370 	 * grace period, then the fact that the task has been enqueued
371 	 * means that we continue to block the current grace period.
372 	 */
373 	rcu_preempt_qs();
374 }
375 
376 /*
377  * Check for preempted RCU readers blocking the current grace period
378  * for the specified rcu_node structure.  If the caller needs a reliable
379  * answer, it must hold the rcu_node's ->lock.
380  */
381 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
382 {
383 	return rnp->gp_tasks != NULL;
384 }
385 
386 /*
387  * Preemptible RCU implementation for rcu_read_lock().
388  * Just increment ->rcu_read_lock_nesting, shared state will be updated
389  * if we block.
390  */
391 void __rcu_read_lock(void)
392 {
393 	current->rcu_read_lock_nesting++;
394 	barrier();  /* critical section after entry code. */
395 }
396 EXPORT_SYMBOL_GPL(__rcu_read_lock);
397 
398 /*
399  * Preemptible RCU implementation for rcu_read_unlock().
400  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
401  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
402  * invoke rcu_read_unlock_special() to clean up after a context switch
403  * in an RCU read-side critical section and other special cases.
404  */
405 void __rcu_read_unlock(void)
406 {
407 	struct task_struct *t = current;
408 
409 	if (t->rcu_read_lock_nesting != 1) {
410 		--t->rcu_read_lock_nesting;
411 	} else {
412 		barrier();  /* critical section before exit code. */
413 		t->rcu_read_lock_nesting = INT_MIN;
414 		barrier();  /* assign before ->rcu_read_unlock_special load */
415 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
416 			rcu_read_unlock_special(t);
417 		barrier();  /* ->rcu_read_unlock_special load before assign */
418 		t->rcu_read_lock_nesting = 0;
419 	}
420 #ifdef CONFIG_PROVE_LOCKING
421 	{
422 		int rrln = READ_ONCE(t->rcu_read_lock_nesting);
423 
424 		WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
425 	}
426 #endif /* #ifdef CONFIG_PROVE_LOCKING */
427 }
428 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
429 
430 /*
431  * Advance a ->blkd_tasks-list pointer to the next entry, instead
432  * returning NULL if at the end of the list.
433  */
434 static struct list_head *rcu_next_node_entry(struct task_struct *t,
435 					     struct rcu_node *rnp)
436 {
437 	struct list_head *np;
438 
439 	np = t->rcu_node_entry.next;
440 	if (np == &rnp->blkd_tasks)
441 		np = NULL;
442 	return np;
443 }
444 
445 /*
446  * Return true if the specified rcu_node structure has tasks that were
447  * preempted within an RCU read-side critical section.
448  */
449 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
450 {
451 	return !list_empty(&rnp->blkd_tasks);
452 }
453 
454 /*
455  * Handle special cases during rcu_read_unlock(), such as needing to
456  * notify RCU core processing or task having blocked during the RCU
457  * read-side critical section.
458  */
459 void rcu_read_unlock_special(struct task_struct *t)
460 {
461 	bool empty_exp;
462 	bool empty_norm;
463 	bool empty_exp_now;
464 	unsigned long flags;
465 	struct list_head *np;
466 	bool drop_boost_mutex = false;
467 	struct rcu_data *rdp;
468 	struct rcu_node *rnp;
469 	union rcu_special special;
470 
471 	/* NMI handlers cannot block and cannot safely manipulate state. */
472 	if (in_nmi())
473 		return;
474 
475 	local_irq_save(flags);
476 
477 	/*
478 	 * If RCU core is waiting for this CPU to exit its critical section,
479 	 * report the fact that it has exited.  Because irqs are disabled,
480 	 * t->rcu_read_unlock_special cannot change.
481 	 */
482 	special = t->rcu_read_unlock_special;
483 	if (special.b.need_qs) {
484 		rcu_preempt_qs();
485 		t->rcu_read_unlock_special.b.need_qs = false;
486 		if (!t->rcu_read_unlock_special.s) {
487 			local_irq_restore(flags);
488 			return;
489 		}
490 	}
491 
492 	/*
493 	 * Respond to a request for an expedited grace period, but only if
494 	 * we were not preempted, meaning that we were running on the same
495 	 * CPU throughout.  If we were preempted, the exp_need_qs flag
496 	 * would have been cleared at the time of the first preemption,
497 	 * and the quiescent state would be reported when we were dequeued.
498 	 */
499 	if (special.b.exp_need_qs) {
500 		WARN_ON_ONCE(special.b.blocked);
501 		t->rcu_read_unlock_special.b.exp_need_qs = false;
502 		rdp = this_cpu_ptr(rcu_state_p->rda);
503 		rcu_report_exp_rdp(rcu_state_p, rdp, true);
504 		if (!t->rcu_read_unlock_special.s) {
505 			local_irq_restore(flags);
506 			return;
507 		}
508 	}
509 
510 	/* Hardware IRQ handlers cannot block, complain if they get here. */
511 	if (in_irq() || in_serving_softirq()) {
512 		lockdep_rcu_suspicious(__FILE__, __LINE__,
513 				       "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
514 		pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
515 			 t->rcu_read_unlock_special.s,
516 			 t->rcu_read_unlock_special.b.blocked,
517 			 t->rcu_read_unlock_special.b.exp_need_qs,
518 			 t->rcu_read_unlock_special.b.need_qs);
519 		local_irq_restore(flags);
520 		return;
521 	}
522 
523 	/* Clean up if blocked during RCU read-side critical section. */
524 	if (special.b.blocked) {
525 		t->rcu_read_unlock_special.b.blocked = false;
526 
527 		/*
528 		 * Remove this task from the list it blocked on.  The task
529 		 * now remains queued on the rcu_node corresponding to the
530 		 * CPU it first blocked on, so there is no longer any need
531 		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
532 		 */
533 		rnp = t->rcu_blocked_node;
534 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
535 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
536 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
537 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
538 		empty_exp = sync_rcu_preempt_exp_done(rnp);
539 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
540 		np = rcu_next_node_entry(t, rnp);
541 		list_del_init(&t->rcu_node_entry);
542 		t->rcu_blocked_node = NULL;
543 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
544 						rnp->gpnum, t->pid);
545 		if (&t->rcu_node_entry == rnp->gp_tasks)
546 			rnp->gp_tasks = np;
547 		if (&t->rcu_node_entry == rnp->exp_tasks)
548 			rnp->exp_tasks = np;
549 		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
550 			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
551 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
552 			if (&t->rcu_node_entry == rnp->boost_tasks)
553 				rnp->boost_tasks = np;
554 		}
555 
556 		/*
557 		 * If this was the last task on the current list, and if
558 		 * we aren't waiting on any CPUs, report the quiescent state.
559 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
560 		 * so we must take a snapshot of the expedited state.
561 		 */
562 		empty_exp_now = sync_rcu_preempt_exp_done(rnp);
563 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
564 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
565 							 rnp->gpnum,
566 							 0, rnp->qsmask,
567 							 rnp->level,
568 							 rnp->grplo,
569 							 rnp->grphi,
570 							 !!rnp->gp_tasks);
571 			rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
572 		} else {
573 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
574 		}
575 
576 		/* Unboost if we were boosted. */
577 		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
578 			rt_mutex_futex_unlock(&rnp->boost_mtx);
579 
580 		/*
581 		 * If this was the last task on the expedited lists,
582 		 * then we need to report up the rcu_node hierarchy.
583 		 */
584 		if (!empty_exp && empty_exp_now)
585 			rcu_report_exp_rnp(rcu_state_p, rnp, true);
586 	} else {
587 		local_irq_restore(flags);
588 	}
589 }
590 
591 /*
592  * Dump detailed information for all tasks blocking the current RCU
593  * grace period on the specified rcu_node structure.
594  */
595 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
596 {
597 	unsigned long flags;
598 	struct task_struct *t;
599 
600 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
601 	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
602 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
603 		return;
604 	}
605 	t = list_entry(rnp->gp_tasks->prev,
606 		       struct task_struct, rcu_node_entry);
607 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
608 		/*
609 		 * We could be printing a lot while holding a spinlock.
610 		 * Avoid triggering hard lockup.
611 		 */
612 		touch_nmi_watchdog();
613 		sched_show_task(t);
614 	}
615 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
616 }
617 
618 /*
619  * Dump detailed information for all tasks blocking the current RCU
620  * grace period.
621  */
622 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
623 {
624 	struct rcu_node *rnp = rcu_get_root(rsp);
625 
626 	rcu_print_detail_task_stall_rnp(rnp);
627 	rcu_for_each_leaf_node(rsp, rnp)
628 		rcu_print_detail_task_stall_rnp(rnp);
629 }
630 
631 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
632 {
633 	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
634 	       rnp->level, rnp->grplo, rnp->grphi);
635 }
636 
637 static void rcu_print_task_stall_end(void)
638 {
639 	pr_cont("\n");
640 }
641 
642 /*
643  * Scan the current list of tasks blocked within RCU read-side critical
644  * sections, printing out the tid of each.
645  */
646 static int rcu_print_task_stall(struct rcu_node *rnp)
647 {
648 	struct task_struct *t;
649 	int ndetected = 0;
650 
651 	if (!rcu_preempt_blocked_readers_cgp(rnp))
652 		return 0;
653 	rcu_print_task_stall_begin(rnp);
654 	t = list_entry(rnp->gp_tasks->prev,
655 		       struct task_struct, rcu_node_entry);
656 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
657 		pr_cont(" P%d", t->pid);
658 		ndetected++;
659 	}
660 	rcu_print_task_stall_end();
661 	return ndetected;
662 }
663 
664 /*
665  * Scan the current list of tasks blocked within RCU read-side critical
666  * sections, printing out the tid of each that is blocking the current
667  * expedited grace period.
668  */
669 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
670 {
671 	struct task_struct *t;
672 	int ndetected = 0;
673 
674 	if (!rnp->exp_tasks)
675 		return 0;
676 	t = list_entry(rnp->exp_tasks->prev,
677 		       struct task_struct, rcu_node_entry);
678 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
679 		pr_cont(" P%d", t->pid);
680 		ndetected++;
681 	}
682 	return ndetected;
683 }
684 
685 /*
686  * Check that the list of blocked tasks for the newly completed grace
687  * period is in fact empty.  It is a serious bug to complete a grace
688  * period that still has RCU readers blocked!  This function must be
689  * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
690  * must be held by the caller.
691  *
692  * Also, if there are blocked tasks on the list, they automatically
693  * block the newly created grace period, so set up ->gp_tasks accordingly.
694  */
695 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
696 {
697 	struct task_struct *t;
698 
699 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
700 	WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
701 	if (rcu_preempt_has_tasks(rnp)) {
702 		rnp->gp_tasks = rnp->blkd_tasks.next;
703 		t = container_of(rnp->gp_tasks, struct task_struct,
704 				 rcu_node_entry);
705 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
706 						rnp->gpnum, t->pid);
707 	}
708 	WARN_ON_ONCE(rnp->qsmask);
709 }
710 
711 /*
712  * Check for a quiescent state from the current CPU.  When a task blocks,
713  * the task is recorded in the corresponding CPU's rcu_node structure,
714  * which is checked elsewhere.
715  *
716  * Caller must disable hard irqs.
717  */
718 static void rcu_preempt_check_callbacks(void)
719 {
720 	struct task_struct *t = current;
721 
722 	if (t->rcu_read_lock_nesting == 0) {
723 		rcu_preempt_qs();
724 		return;
725 	}
726 	if (t->rcu_read_lock_nesting > 0 &&
727 	    __this_cpu_read(rcu_data_p->core_needs_qs) &&
728 	    __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
729 		t->rcu_read_unlock_special.b.need_qs = true;
730 }
731 
732 /**
733  * call_rcu() - Queue an RCU callback for invocation after a grace period.
734  * @head: structure to be used for queueing the RCU updates.
735  * @func: actual callback function to be invoked after the grace period
736  *
737  * The callback function will be invoked some time after a full grace
738  * period elapses, in other words after all pre-existing RCU read-side
739  * critical sections have completed.  However, the callback function
740  * might well execute concurrently with RCU read-side critical sections
741  * that started after call_rcu() was invoked.  RCU read-side critical
742  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
743  * and may be nested.
744  *
745  * Note that all CPUs must agree that the grace period extended beyond
746  * all pre-existing RCU read-side critical section.  On systems with more
747  * than one CPU, this means that when "func()" is invoked, each CPU is
748  * guaranteed to have executed a full memory barrier since the end of its
749  * last RCU read-side critical section whose beginning preceded the call
750  * to call_rcu().  It also means that each CPU executing an RCU read-side
751  * critical section that continues beyond the start of "func()" must have
752  * executed a memory barrier after the call_rcu() but before the beginning
753  * of that RCU read-side critical section.  Note that these guarantees
754  * include CPUs that are offline, idle, or executing in user mode, as
755  * well as CPUs that are executing in the kernel.
756  *
757  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
758  * resulting RCU callback function "func()", then both CPU A and CPU B are
759  * guaranteed to execute a full memory barrier during the time interval
760  * between the call to call_rcu() and the invocation of "func()" -- even
761  * if CPU A and CPU B are the same CPU (but again only if the system has
762  * more than one CPU).
763  */
764 void call_rcu(struct rcu_head *head, rcu_callback_t func)
765 {
766 	__call_rcu(head, func, rcu_state_p, -1, 0);
767 }
768 EXPORT_SYMBOL_GPL(call_rcu);
769 
770 /**
771  * synchronize_rcu - wait until a grace period has elapsed.
772  *
773  * Control will return to the caller some time after a full grace
774  * period has elapsed, in other words after all currently executing RCU
775  * read-side critical sections have completed.  Note, however, that
776  * upon return from synchronize_rcu(), the caller might well be executing
777  * concurrently with new RCU read-side critical sections that began while
778  * synchronize_rcu() was waiting.  RCU read-side critical sections are
779  * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
780  *
781  * See the description of synchronize_sched() for more detailed
782  * information on memory-ordering guarantees.  However, please note
783  * that -only- the memory-ordering guarantees apply.  For example,
784  * synchronize_rcu() is -not- guaranteed to wait on things like code
785  * protected by preempt_disable(), instead, synchronize_rcu() is -only-
786  * guaranteed to wait on RCU read-side critical sections, that is, sections
787  * of code protected by rcu_read_lock().
788  */
789 void synchronize_rcu(void)
790 {
791 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
792 			 lock_is_held(&rcu_lock_map) ||
793 			 lock_is_held(&rcu_sched_lock_map),
794 			 "Illegal synchronize_rcu() in RCU read-side critical section");
795 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
796 		return;
797 	if (rcu_gp_is_expedited())
798 		synchronize_rcu_expedited();
799 	else
800 		wait_rcu_gp(call_rcu);
801 }
802 EXPORT_SYMBOL_GPL(synchronize_rcu);
803 
804 /**
805  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
806  *
807  * Note that this primitive does not necessarily wait for an RCU grace period
808  * to complete.  For example, if there are no RCU callbacks queued anywhere
809  * in the system, then rcu_barrier() is within its rights to return
810  * immediately, without waiting for anything, much less an RCU grace period.
811  */
812 void rcu_barrier(void)
813 {
814 	_rcu_barrier(rcu_state_p);
815 }
816 EXPORT_SYMBOL_GPL(rcu_barrier);
817 
818 /*
819  * Initialize preemptible RCU's state structures.
820  */
821 static void __init __rcu_init_preempt(void)
822 {
823 	rcu_init_one(rcu_state_p);
824 }
825 
826 /*
827  * Check for a task exiting while in a preemptible-RCU read-side
828  * critical section, clean up if so.  No need to issue warnings,
829  * as debug_check_no_locks_held() already does this if lockdep
830  * is enabled.
831  */
832 void exit_rcu(void)
833 {
834 	struct task_struct *t = current;
835 
836 	if (likely(list_empty(&current->rcu_node_entry)))
837 		return;
838 	t->rcu_read_lock_nesting = 1;
839 	barrier();
840 	t->rcu_read_unlock_special.b.blocked = true;
841 	__rcu_read_unlock();
842 }
843 
844 #else /* #ifdef CONFIG_PREEMPT_RCU */
845 
846 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
847 
848 /*
849  * Tell them what RCU they are running.
850  */
851 static void __init rcu_bootup_announce(void)
852 {
853 	pr_info("Hierarchical RCU implementation.\n");
854 	rcu_bootup_announce_oddness();
855 }
856 
857 /*
858  * Because preemptible RCU does not exist, we never have to check for
859  * CPUs being in quiescent states.
860  */
861 static void rcu_preempt_note_context_switch(bool preempt)
862 {
863 }
864 
865 /*
866  * Because preemptible RCU does not exist, there are never any preempted
867  * RCU readers.
868  */
869 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
870 {
871 	return 0;
872 }
873 
874 /*
875  * Because there is no preemptible RCU, there can be no readers blocked.
876  */
877 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
878 {
879 	return false;
880 }
881 
882 /*
883  * Because preemptible RCU does not exist, we never have to check for
884  * tasks blocked within RCU read-side critical sections.
885  */
886 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
887 {
888 }
889 
890 /*
891  * Because preemptible RCU does not exist, we never have to check for
892  * tasks blocked within RCU read-side critical sections.
893  */
894 static int rcu_print_task_stall(struct rcu_node *rnp)
895 {
896 	return 0;
897 }
898 
899 /*
900  * Because preemptible RCU does not exist, we never have to check for
901  * tasks blocked within RCU read-side critical sections that are
902  * blocking the current expedited grace period.
903  */
904 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
905 {
906 	return 0;
907 }
908 
909 /*
910  * Because there is no preemptible RCU, there can be no readers blocked,
911  * so there is no need to check for blocked tasks.  So check only for
912  * bogus qsmask values.
913  */
914 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
915 {
916 	WARN_ON_ONCE(rnp->qsmask);
917 }
918 
919 /*
920  * Because preemptible RCU does not exist, it never has any callbacks
921  * to check.
922  */
923 static void rcu_preempt_check_callbacks(void)
924 {
925 }
926 
927 /*
928  * Because preemptible RCU does not exist, rcu_barrier() is just
929  * another name for rcu_barrier_sched().
930  */
931 void rcu_barrier(void)
932 {
933 	rcu_barrier_sched();
934 }
935 EXPORT_SYMBOL_GPL(rcu_barrier);
936 
937 /*
938  * Because preemptible RCU does not exist, it need not be initialized.
939  */
940 static void __init __rcu_init_preempt(void)
941 {
942 }
943 
944 /*
945  * Because preemptible RCU does not exist, tasks cannot possibly exit
946  * while in preemptible RCU read-side critical sections.
947  */
948 void exit_rcu(void)
949 {
950 }
951 
952 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
953 
954 #ifdef CONFIG_RCU_BOOST
955 
956 static void rcu_wake_cond(struct task_struct *t, int status)
957 {
958 	/*
959 	 * If the thread is yielding, only wake it when this
960 	 * is invoked from idle
961 	 */
962 	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
963 		wake_up_process(t);
964 }
965 
966 /*
967  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
968  * or ->boost_tasks, advancing the pointer to the next task in the
969  * ->blkd_tasks list.
970  *
971  * Note that irqs must be enabled: boosting the task can block.
972  * Returns 1 if there are more tasks needing to be boosted.
973  */
974 static int rcu_boost(struct rcu_node *rnp)
975 {
976 	unsigned long flags;
977 	struct task_struct *t;
978 	struct list_head *tb;
979 
980 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
981 	    READ_ONCE(rnp->boost_tasks) == NULL)
982 		return 0;  /* Nothing left to boost. */
983 
984 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
985 
986 	/*
987 	 * Recheck under the lock: all tasks in need of boosting
988 	 * might exit their RCU read-side critical sections on their own.
989 	 */
990 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
991 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
992 		return 0;
993 	}
994 
995 	/*
996 	 * Preferentially boost tasks blocking expedited grace periods.
997 	 * This cannot starve the normal grace periods because a second
998 	 * expedited grace period must boost all blocked tasks, including
999 	 * those blocking the pre-existing normal grace period.
1000 	 */
1001 	if (rnp->exp_tasks != NULL)
1002 		tb = rnp->exp_tasks;
1003 	else
1004 		tb = rnp->boost_tasks;
1005 
1006 	/*
1007 	 * We boost task t by manufacturing an rt_mutex that appears to
1008 	 * be held by task t.  We leave a pointer to that rt_mutex where
1009 	 * task t can find it, and task t will release the mutex when it
1010 	 * exits its outermost RCU read-side critical section.  Then
1011 	 * simply acquiring this artificial rt_mutex will boost task
1012 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1013 	 *
1014 	 * Note that task t must acquire rnp->lock to remove itself from
1015 	 * the ->blkd_tasks list, which it will do from exit() if from
1016 	 * nowhere else.  We therefore are guaranteed that task t will
1017 	 * stay around at least until we drop rnp->lock.  Note that
1018 	 * rnp->lock also resolves races between our priority boosting
1019 	 * and task t's exiting its outermost RCU read-side critical
1020 	 * section.
1021 	 */
1022 	t = container_of(tb, struct task_struct, rcu_node_entry);
1023 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1024 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1025 	/* Lock only for side effect: boosts task t's priority. */
1026 	rt_mutex_lock(&rnp->boost_mtx);
1027 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1028 
1029 	return READ_ONCE(rnp->exp_tasks) != NULL ||
1030 	       READ_ONCE(rnp->boost_tasks) != NULL;
1031 }
1032 
1033 /*
1034  * Priority-boosting kthread, one per leaf rcu_node.
1035  */
1036 static int rcu_boost_kthread(void *arg)
1037 {
1038 	struct rcu_node *rnp = (struct rcu_node *)arg;
1039 	int spincnt = 0;
1040 	int more2boost;
1041 
1042 	trace_rcu_utilization(TPS("Start boost kthread@init"));
1043 	for (;;) {
1044 		rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1045 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1046 		rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1047 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1048 		rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1049 		more2boost = rcu_boost(rnp);
1050 		if (more2boost)
1051 			spincnt++;
1052 		else
1053 			spincnt = 0;
1054 		if (spincnt > 10) {
1055 			rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1056 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1057 			schedule_timeout_interruptible(2);
1058 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1059 			spincnt = 0;
1060 		}
1061 	}
1062 	/* NOTREACHED */
1063 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1064 	return 0;
1065 }
1066 
1067 /*
1068  * Check to see if it is time to start boosting RCU readers that are
1069  * blocking the current grace period, and, if so, tell the per-rcu_node
1070  * kthread to start boosting them.  If there is an expedited grace
1071  * period in progress, it is always time to boost.
1072  *
1073  * The caller must hold rnp->lock, which this function releases.
1074  * The ->boost_kthread_task is immortal, so we don't need to worry
1075  * about it going away.
1076  */
1077 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1078 	__releases(rnp->lock)
1079 {
1080 	struct task_struct *t;
1081 
1082 	raw_lockdep_assert_held_rcu_node(rnp);
1083 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1084 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1085 		return;
1086 	}
1087 	if (rnp->exp_tasks != NULL ||
1088 	    (rnp->gp_tasks != NULL &&
1089 	     rnp->boost_tasks == NULL &&
1090 	     rnp->qsmask == 0 &&
1091 	     ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1092 		if (rnp->exp_tasks == NULL)
1093 			rnp->boost_tasks = rnp->gp_tasks;
1094 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1095 		t = rnp->boost_kthread_task;
1096 		if (t)
1097 			rcu_wake_cond(t, rnp->boost_kthread_status);
1098 	} else {
1099 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1100 	}
1101 }
1102 
1103 /*
1104  * Wake up the per-CPU kthread to invoke RCU callbacks.
1105  */
1106 static void invoke_rcu_callbacks_kthread(void)
1107 {
1108 	unsigned long flags;
1109 
1110 	local_irq_save(flags);
1111 	__this_cpu_write(rcu_cpu_has_work, 1);
1112 	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1113 	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
1114 		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1115 			      __this_cpu_read(rcu_cpu_kthread_status));
1116 	}
1117 	local_irq_restore(flags);
1118 }
1119 
1120 /*
1121  * Is the current CPU running the RCU-callbacks kthread?
1122  * Caller must have preemption disabled.
1123  */
1124 static bool rcu_is_callbacks_kthread(void)
1125 {
1126 	return __this_cpu_read(rcu_cpu_kthread_task) == current;
1127 }
1128 
1129 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1130 
1131 /*
1132  * Do priority-boost accounting for the start of a new grace period.
1133  */
1134 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1135 {
1136 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1137 }
1138 
1139 /*
1140  * Create an RCU-boost kthread for the specified node if one does not
1141  * already exist.  We only create this kthread for preemptible RCU.
1142  * Returns zero if all is well, a negated errno otherwise.
1143  */
1144 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1145 				       struct rcu_node *rnp)
1146 {
1147 	int rnp_index = rnp - &rsp->node[0];
1148 	unsigned long flags;
1149 	struct sched_param sp;
1150 	struct task_struct *t;
1151 
1152 	if (rcu_state_p != rsp)
1153 		return 0;
1154 
1155 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1156 		return 0;
1157 
1158 	rsp->boost = 1;
1159 	if (rnp->boost_kthread_task != NULL)
1160 		return 0;
1161 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1162 			   "rcub/%d", rnp_index);
1163 	if (IS_ERR(t))
1164 		return PTR_ERR(t);
1165 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1166 	rnp->boost_kthread_task = t;
1167 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1168 	sp.sched_priority = kthread_prio;
1169 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1170 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1171 	return 0;
1172 }
1173 
1174 static void rcu_kthread_do_work(void)
1175 {
1176 	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1177 	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1178 	rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
1179 }
1180 
1181 static void rcu_cpu_kthread_setup(unsigned int cpu)
1182 {
1183 	struct sched_param sp;
1184 
1185 	sp.sched_priority = kthread_prio;
1186 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1187 }
1188 
1189 static void rcu_cpu_kthread_park(unsigned int cpu)
1190 {
1191 	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1192 }
1193 
1194 static int rcu_cpu_kthread_should_run(unsigned int cpu)
1195 {
1196 	return __this_cpu_read(rcu_cpu_has_work);
1197 }
1198 
1199 /*
1200  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
1201  * RCU softirq used in flavors and configurations of RCU that do not
1202  * support RCU priority boosting.
1203  */
1204 static void rcu_cpu_kthread(unsigned int cpu)
1205 {
1206 	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1207 	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1208 	int spincnt;
1209 
1210 	for (spincnt = 0; spincnt < 10; spincnt++) {
1211 		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1212 		local_bh_disable();
1213 		*statusp = RCU_KTHREAD_RUNNING;
1214 		this_cpu_inc(rcu_cpu_kthread_loops);
1215 		local_irq_disable();
1216 		work = *workp;
1217 		*workp = 0;
1218 		local_irq_enable();
1219 		if (work)
1220 			rcu_kthread_do_work();
1221 		local_bh_enable();
1222 		if (*workp == 0) {
1223 			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1224 			*statusp = RCU_KTHREAD_WAITING;
1225 			return;
1226 		}
1227 	}
1228 	*statusp = RCU_KTHREAD_YIELDING;
1229 	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1230 	schedule_timeout_interruptible(2);
1231 	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1232 	*statusp = RCU_KTHREAD_WAITING;
1233 }
1234 
1235 /*
1236  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1237  * served by the rcu_node in question.  The CPU hotplug lock is still
1238  * held, so the value of rnp->qsmaskinit will be stable.
1239  *
1240  * We don't include outgoingcpu in the affinity set, use -1 if there is
1241  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1242  * this function allows the kthread to execute on any CPU.
1243  */
1244 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1245 {
1246 	struct task_struct *t = rnp->boost_kthread_task;
1247 	unsigned long mask = rcu_rnp_online_cpus(rnp);
1248 	cpumask_var_t cm;
1249 	int cpu;
1250 
1251 	if (!t)
1252 		return;
1253 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1254 		return;
1255 	for_each_leaf_node_possible_cpu(rnp, cpu)
1256 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1257 		    cpu != outgoingcpu)
1258 			cpumask_set_cpu(cpu, cm);
1259 	if (cpumask_weight(cm) == 0)
1260 		cpumask_setall(cm);
1261 	set_cpus_allowed_ptr(t, cm);
1262 	free_cpumask_var(cm);
1263 }
1264 
1265 static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1266 	.store			= &rcu_cpu_kthread_task,
1267 	.thread_should_run	= rcu_cpu_kthread_should_run,
1268 	.thread_fn		= rcu_cpu_kthread,
1269 	.thread_comm		= "rcuc/%u",
1270 	.setup			= rcu_cpu_kthread_setup,
1271 	.park			= rcu_cpu_kthread_park,
1272 };
1273 
1274 /*
1275  * Spawn boost kthreads -- called as soon as the scheduler is running.
1276  */
1277 static void __init rcu_spawn_boost_kthreads(void)
1278 {
1279 	struct rcu_node *rnp;
1280 	int cpu;
1281 
1282 	for_each_possible_cpu(cpu)
1283 		per_cpu(rcu_cpu_has_work, cpu) = 0;
1284 	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1285 	rcu_for_each_leaf_node(rcu_state_p, rnp)
1286 		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1287 }
1288 
1289 static void rcu_prepare_kthreads(int cpu)
1290 {
1291 	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
1292 	struct rcu_node *rnp = rdp->mynode;
1293 
1294 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1295 	if (rcu_scheduler_fully_active)
1296 		(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
1297 }
1298 
1299 #else /* #ifdef CONFIG_RCU_BOOST */
1300 
1301 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1302 	__releases(rnp->lock)
1303 {
1304 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1305 }
1306 
1307 static void invoke_rcu_callbacks_kthread(void)
1308 {
1309 	WARN_ON_ONCE(1);
1310 }
1311 
1312 static bool rcu_is_callbacks_kthread(void)
1313 {
1314 	return false;
1315 }
1316 
1317 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1318 {
1319 }
1320 
1321 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1322 {
1323 }
1324 
1325 static void __init rcu_spawn_boost_kthreads(void)
1326 {
1327 }
1328 
1329 static void rcu_prepare_kthreads(int cpu)
1330 {
1331 }
1332 
1333 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1334 
1335 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1336 
1337 /*
1338  * Check to see if any future RCU-related work will need to be done
1339  * by the current CPU, even if none need be done immediately, returning
1340  * 1 if so.  This function is part of the RCU implementation; it is -not-
1341  * an exported member of the RCU API.
1342  *
1343  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1344  * any flavor of RCU.
1345  */
1346 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1347 {
1348 	*nextevt = KTIME_MAX;
1349 	return rcu_cpu_has_callbacks(NULL);
1350 }
1351 
1352 /*
1353  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1354  * after it.
1355  */
1356 static void rcu_cleanup_after_idle(void)
1357 {
1358 }
1359 
1360 /*
1361  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1362  * is nothing.
1363  */
1364 static void rcu_prepare_for_idle(void)
1365 {
1366 }
1367 
1368 /*
1369  * Don't bother keeping a running count of the number of RCU callbacks
1370  * posted because CONFIG_RCU_FAST_NO_HZ=n.
1371  */
1372 static void rcu_idle_count_callbacks_posted(void)
1373 {
1374 }
1375 
1376 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1377 
1378 /*
1379  * This code is invoked when a CPU goes idle, at which point we want
1380  * to have the CPU do everything required for RCU so that it can enter
1381  * the energy-efficient dyntick-idle mode.  This is handled by a
1382  * state machine implemented by rcu_prepare_for_idle() below.
1383  *
1384  * The following three proprocessor symbols control this state machine:
1385  *
1386  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1387  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1388  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1389  *	benchmarkers who might otherwise be tempted to set this to a large
1390  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1391  *	system.  And if you are -that- concerned about energy efficiency,
1392  *	just power the system down and be done with it!
1393  * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1394  *	permitted to sleep in dyntick-idle mode with only lazy RCU
1395  *	callbacks pending.  Setting this too high can OOM your system.
1396  *
1397  * The values below work well in practice.  If future workloads require
1398  * adjustment, they can be converted into kernel config parameters, though
1399  * making the state machine smarter might be a better option.
1400  */
1401 #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1402 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)	/* Roughly six seconds. */
1403 
1404 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1405 module_param(rcu_idle_gp_delay, int, 0644);
1406 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
1407 module_param(rcu_idle_lazy_gp_delay, int, 0644);
1408 
1409 /*
1410  * Try to advance callbacks for all flavors of RCU on the current CPU, but
1411  * only if it has been awhile since the last time we did so.  Afterwards,
1412  * if there are any callbacks ready for immediate invocation, return true.
1413  */
1414 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1415 {
1416 	bool cbs_ready = false;
1417 	struct rcu_data *rdp;
1418 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1419 	struct rcu_node *rnp;
1420 	struct rcu_state *rsp;
1421 
1422 	/* Exit early if we advanced recently. */
1423 	if (jiffies == rdtp->last_advance_all)
1424 		return false;
1425 	rdtp->last_advance_all = jiffies;
1426 
1427 	for_each_rcu_flavor(rsp) {
1428 		rdp = this_cpu_ptr(rsp->rda);
1429 		rnp = rdp->mynode;
1430 
1431 		/*
1432 		 * Don't bother checking unless a grace period has
1433 		 * completed since we last checked and there are
1434 		 * callbacks not yet ready to invoke.
1435 		 */
1436 		if ((rdp->completed != rnp->completed ||
1437 		     unlikely(READ_ONCE(rdp->gpwrap))) &&
1438 		    rcu_segcblist_pend_cbs(&rdp->cblist))
1439 			note_gp_changes(rsp, rdp);
1440 
1441 		if (rcu_segcblist_ready_cbs(&rdp->cblist))
1442 			cbs_ready = true;
1443 	}
1444 	return cbs_ready;
1445 }
1446 
1447 /*
1448  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1449  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1450  * caller to set the timeout based on whether or not there are non-lazy
1451  * callbacks.
1452  *
1453  * The caller must have disabled interrupts.
1454  */
1455 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1456 {
1457 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1458 	unsigned long dj;
1459 
1460 	lockdep_assert_irqs_disabled();
1461 
1462 	/* Snapshot to detect later posting of non-lazy callback. */
1463 	rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1464 
1465 	/* If no callbacks, RCU doesn't need the CPU. */
1466 	if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
1467 		*nextevt = KTIME_MAX;
1468 		return 0;
1469 	}
1470 
1471 	/* Attempt to advance callbacks. */
1472 	if (rcu_try_advance_all_cbs()) {
1473 		/* Some ready to invoke, so initiate later invocation. */
1474 		invoke_rcu_core();
1475 		return 1;
1476 	}
1477 	rdtp->last_accelerate = jiffies;
1478 
1479 	/* Request timer delay depending on laziness, and round. */
1480 	if (!rdtp->all_lazy) {
1481 		dj = round_up(rcu_idle_gp_delay + jiffies,
1482 			       rcu_idle_gp_delay) - jiffies;
1483 	} else {
1484 		dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
1485 	}
1486 	*nextevt = basemono + dj * TICK_NSEC;
1487 	return 0;
1488 }
1489 
1490 /*
1491  * Prepare a CPU for idle from an RCU perspective.  The first major task
1492  * is to sense whether nohz mode has been enabled or disabled via sysfs.
1493  * The second major task is to check to see if a non-lazy callback has
1494  * arrived at a CPU that previously had only lazy callbacks.  The third
1495  * major task is to accelerate (that is, assign grace-period numbers to)
1496  * any recently arrived callbacks.
1497  *
1498  * The caller must have disabled interrupts.
1499  */
1500 static void rcu_prepare_for_idle(void)
1501 {
1502 	bool needwake;
1503 	struct rcu_data *rdp;
1504 	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
1505 	struct rcu_node *rnp;
1506 	struct rcu_state *rsp;
1507 	int tne;
1508 
1509 	lockdep_assert_irqs_disabled();
1510 	if (rcu_is_nocb_cpu(smp_processor_id()))
1511 		return;
1512 
1513 	/* Handle nohz enablement switches conservatively. */
1514 	tne = READ_ONCE(tick_nohz_active);
1515 	if (tne != rdtp->tick_nohz_enabled_snap) {
1516 		if (rcu_cpu_has_callbacks(NULL))
1517 			invoke_rcu_core(); /* force nohz to see update. */
1518 		rdtp->tick_nohz_enabled_snap = tne;
1519 		return;
1520 	}
1521 	if (!tne)
1522 		return;
1523 
1524 	/*
1525 	 * If a non-lazy callback arrived at a CPU having only lazy
1526 	 * callbacks, invoke RCU core for the side-effect of recalculating
1527 	 * idle duration on re-entry to idle.
1528 	 */
1529 	if (rdtp->all_lazy &&
1530 	    rdtp->nonlazy_posted != rdtp->nonlazy_posted_snap) {
1531 		rdtp->all_lazy = false;
1532 		rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
1533 		invoke_rcu_core();
1534 		return;
1535 	}
1536 
1537 	/*
1538 	 * If we have not yet accelerated this jiffy, accelerate all
1539 	 * callbacks on this CPU.
1540 	 */
1541 	if (rdtp->last_accelerate == jiffies)
1542 		return;
1543 	rdtp->last_accelerate = jiffies;
1544 	for_each_rcu_flavor(rsp) {
1545 		rdp = this_cpu_ptr(rsp->rda);
1546 		if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1547 			continue;
1548 		rnp = rdp->mynode;
1549 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1550 		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
1551 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1552 		if (needwake)
1553 			rcu_gp_kthread_wake(rsp);
1554 	}
1555 }
1556 
1557 /*
1558  * Clean up for exit from idle.  Attempt to advance callbacks based on
1559  * any grace periods that elapsed while the CPU was idle, and if any
1560  * callbacks are now ready to invoke, initiate invocation.
1561  */
1562 static void rcu_cleanup_after_idle(void)
1563 {
1564 	lockdep_assert_irqs_disabled();
1565 	if (rcu_is_nocb_cpu(smp_processor_id()))
1566 		return;
1567 	if (rcu_try_advance_all_cbs())
1568 		invoke_rcu_core();
1569 }
1570 
1571 /*
1572  * Keep a running count of the number of non-lazy callbacks posted
1573  * on this CPU.  This running counter (which is never decremented) allows
1574  * rcu_prepare_for_idle() to detect when something out of the idle loop
1575  * posts a callback, even if an equal number of callbacks are invoked.
1576  * Of course, callbacks should only be posted from within a trace event
1577  * designed to be called from idle or from within RCU_NONIDLE().
1578  */
1579 static void rcu_idle_count_callbacks_posted(void)
1580 {
1581 	__this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
1582 }
1583 
1584 /*
1585  * Data for flushing lazy RCU callbacks at OOM time.
1586  */
1587 static atomic_t oom_callback_count;
1588 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1589 
1590 /*
1591  * RCU OOM callback -- decrement the outstanding count and deliver the
1592  * wake-up if we are the last one.
1593  */
1594 static void rcu_oom_callback(struct rcu_head *rhp)
1595 {
1596 	if (atomic_dec_and_test(&oom_callback_count))
1597 		wake_up(&oom_callback_wq);
1598 }
1599 
1600 /*
1601  * Post an rcu_oom_notify callback on the current CPU if it has at
1602  * least one lazy callback.  This will unnecessarily post callbacks
1603  * to CPUs that already have a non-lazy callback at the end of their
1604  * callback list, but this is an infrequent operation, so accept some
1605  * extra overhead to keep things simple.
1606  */
1607 static void rcu_oom_notify_cpu(void *unused)
1608 {
1609 	struct rcu_state *rsp;
1610 	struct rcu_data *rdp;
1611 
1612 	for_each_rcu_flavor(rsp) {
1613 		rdp = raw_cpu_ptr(rsp->rda);
1614 		if (rcu_segcblist_n_lazy_cbs(&rdp->cblist)) {
1615 			atomic_inc(&oom_callback_count);
1616 			rsp->call(&rdp->oom_head, rcu_oom_callback);
1617 		}
1618 	}
1619 }
1620 
1621 /*
1622  * If low on memory, ensure that each CPU has a non-lazy callback.
1623  * This will wake up CPUs that have only lazy callbacks, in turn
1624  * ensuring that they free up the corresponding memory in a timely manner.
1625  * Because an uncertain amount of memory will be freed in some uncertain
1626  * timeframe, we do not claim to have freed anything.
1627  */
1628 static int rcu_oom_notify(struct notifier_block *self,
1629 			  unsigned long notused, void *nfreed)
1630 {
1631 	int cpu;
1632 
1633 	/* Wait for callbacks from earlier instance to complete. */
1634 	wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1635 	smp_mb(); /* Ensure callback reuse happens after callback invocation. */
1636 
1637 	/*
1638 	 * Prevent premature wakeup: ensure that all increments happen
1639 	 * before there is a chance of the counter reaching zero.
1640 	 */
1641 	atomic_set(&oom_callback_count, 1);
1642 
1643 	for_each_online_cpu(cpu) {
1644 		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1645 		cond_resched_tasks_rcu_qs();
1646 	}
1647 
1648 	/* Unconditionally decrement: no need to wake ourselves up. */
1649 	atomic_dec(&oom_callback_count);
1650 
1651 	return NOTIFY_OK;
1652 }
1653 
1654 static struct notifier_block rcu_oom_nb = {
1655 	.notifier_call = rcu_oom_notify
1656 };
1657 
1658 static int __init rcu_register_oom_notifier(void)
1659 {
1660 	register_oom_notifier(&rcu_oom_nb);
1661 	return 0;
1662 }
1663 early_initcall(rcu_register_oom_notifier);
1664 
1665 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1666 
1667 #ifdef CONFIG_RCU_FAST_NO_HZ
1668 
1669 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1670 {
1671 	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1672 	unsigned long nlpd = rdtp->nonlazy_posted - rdtp->nonlazy_posted_snap;
1673 
1674 	sprintf(cp, "last_accelerate: %04lx/%04lx, nonlazy_posted: %ld, %c%c",
1675 		rdtp->last_accelerate & 0xffff, jiffies & 0xffff,
1676 		ulong2long(nlpd),
1677 		rdtp->all_lazy ? 'L' : '.',
1678 		rdtp->tick_nohz_enabled_snap ? '.' : 'D');
1679 }
1680 
1681 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1682 
1683 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1684 {
1685 	*cp = '\0';
1686 }
1687 
1688 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1689 
1690 /* Initiate the stall-info list. */
1691 static void print_cpu_stall_info_begin(void)
1692 {
1693 	pr_cont("\n");
1694 }
1695 
1696 /*
1697  * Print out diagnostic information for the specified stalled CPU.
1698  *
1699  * If the specified CPU is aware of the current RCU grace period
1700  * (flavor specified by rsp), then print the number of scheduling
1701  * clock interrupts the CPU has taken during the time that it has
1702  * been aware.  Otherwise, print the number of RCU grace periods
1703  * that this CPU is ignorant of, for example, "1" if the CPU was
1704  * aware of the previous grace period.
1705  *
1706  * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1707  */
1708 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1709 {
1710 	unsigned long delta;
1711 	char fast_no_hz[72];
1712 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1713 	struct rcu_dynticks *rdtp = rdp->dynticks;
1714 	char *ticks_title;
1715 	unsigned long ticks_value;
1716 
1717 	/*
1718 	 * We could be printing a lot while holding a spinlock.  Avoid
1719 	 * triggering hard lockup.
1720 	 */
1721 	touch_nmi_watchdog();
1722 
1723 	if (rsp->gpnum == rdp->gpnum) {
1724 		ticks_title = "ticks this GP";
1725 		ticks_value = rdp->ticks_this_gp;
1726 	} else {
1727 		ticks_title = "GPs behind";
1728 		ticks_value = rsp->gpnum - rdp->gpnum;
1729 	}
1730 	print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1731 	delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
1732 	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
1733 	       cpu,
1734 	       "O."[!!cpu_online(cpu)],
1735 	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
1736 	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
1737 	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
1738 			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
1739 				"!."[!delta],
1740 	       ticks_value, ticks_title,
1741 	       rcu_dynticks_snap(rdtp) & 0xfff,
1742 	       rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1743 	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
1744 	       READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
1745 	       fast_no_hz);
1746 }
1747 
1748 /* Terminate the stall-info list. */
1749 static void print_cpu_stall_info_end(void)
1750 {
1751 	pr_err("\t");
1752 }
1753 
1754 /* Zero ->ticks_this_gp for all flavors of RCU. */
1755 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1756 {
1757 	rdp->ticks_this_gp = 0;
1758 	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
1759 }
1760 
1761 /* Increment ->ticks_this_gp for all flavors of RCU. */
1762 static void increment_cpu_stall_ticks(void)
1763 {
1764 	struct rcu_state *rsp;
1765 
1766 	for_each_rcu_flavor(rsp)
1767 		raw_cpu_inc(rsp->rda->ticks_this_gp);
1768 }
1769 
1770 #ifdef CONFIG_RCU_NOCB_CPU
1771 
1772 /*
1773  * Offload callback processing from the boot-time-specified set of CPUs
1774  * specified by rcu_nocb_mask.  For each CPU in the set, there is a
1775  * kthread created that pulls the callbacks from the corresponding CPU,
1776  * waits for a grace period to elapse, and invokes the callbacks.
1777  * The no-CBs CPUs do a wake_up() on their kthread when they insert
1778  * a callback into any empty list, unless the rcu_nocb_poll boot parameter
1779  * has been specified, in which case each kthread actively polls its
1780  * CPU.  (Which isn't so great for energy efficiency, but which does
1781  * reduce RCU's overhead on that CPU.)
1782  *
1783  * This is intended to be used in conjunction with Frederic Weisbecker's
1784  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1785  * running CPU-bound user-mode computations.
1786  *
1787  * Offloading of callback processing could also in theory be used as
1788  * an energy-efficiency measure because CPUs with no RCU callbacks
1789  * queued are more aggressive about entering dyntick-idle mode.
1790  */
1791 
1792 
1793 /* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
1794 static int __init rcu_nocb_setup(char *str)
1795 {
1796 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1797 	cpulist_parse(str, rcu_nocb_mask);
1798 	return 1;
1799 }
1800 __setup("rcu_nocbs=", rcu_nocb_setup);
1801 
1802 static int __init parse_rcu_nocb_poll(char *arg)
1803 {
1804 	rcu_nocb_poll = true;
1805 	return 0;
1806 }
1807 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1808 
1809 /*
1810  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1811  * grace period.
1812  */
1813 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1814 {
1815 	swake_up_all(sq);
1816 }
1817 
1818 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1819 {
1820 	return &rnp->nocb_gp_wq[rnp->completed & 0x1];
1821 }
1822 
1823 static void rcu_init_one_nocb(struct rcu_node *rnp)
1824 {
1825 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1826 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1827 }
1828 
1829 /* Is the specified CPU a no-CBs CPU? */
1830 bool rcu_is_nocb_cpu(int cpu)
1831 {
1832 	if (cpumask_available(rcu_nocb_mask))
1833 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
1834 	return false;
1835 }
1836 
1837 /*
1838  * Kick the leader kthread for this NOCB group.  Caller holds ->nocb_lock
1839  * and this function releases it.
1840  */
1841 static void __wake_nocb_leader(struct rcu_data *rdp, bool force,
1842 			       unsigned long flags)
1843 	__releases(rdp->nocb_lock)
1844 {
1845 	struct rcu_data *rdp_leader = rdp->nocb_leader;
1846 
1847 	lockdep_assert_held(&rdp->nocb_lock);
1848 	if (!READ_ONCE(rdp_leader->nocb_kthread)) {
1849 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1850 		return;
1851 	}
1852 	if (rdp_leader->nocb_leader_sleep || force) {
1853 		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
1854 		WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
1855 		del_timer(&rdp->nocb_timer);
1856 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1857 		smp_mb(); /* ->nocb_leader_sleep before swake_up(). */
1858 		swake_up(&rdp_leader->nocb_wq);
1859 	} else {
1860 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1861 	}
1862 }
1863 
1864 /*
1865  * Kick the leader kthread for this NOCB group, but caller has not
1866  * acquired locks.
1867  */
1868 static void wake_nocb_leader(struct rcu_data *rdp, bool force)
1869 {
1870 	unsigned long flags;
1871 
1872 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1873 	__wake_nocb_leader(rdp, force, flags);
1874 }
1875 
1876 /*
1877  * Arrange to wake the leader kthread for this NOCB group at some
1878  * future time when it is safe to do so.
1879  */
1880 static void wake_nocb_leader_defer(struct rcu_data *rdp, int waketype,
1881 				   const char *reason)
1882 {
1883 	unsigned long flags;
1884 
1885 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1886 	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
1887 		mod_timer(&rdp->nocb_timer, jiffies + 1);
1888 	WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
1889 	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, reason);
1890 	raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1891 }
1892 
1893 /*
1894  * Does the specified CPU need an RCU callback for the specified flavor
1895  * of rcu_barrier()?
1896  */
1897 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
1898 {
1899 	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1900 	unsigned long ret;
1901 #ifdef CONFIG_PROVE_RCU
1902 	struct rcu_head *rhp;
1903 #endif /* #ifdef CONFIG_PROVE_RCU */
1904 
1905 	/*
1906 	 * Check count of all no-CBs callbacks awaiting invocation.
1907 	 * There needs to be a barrier before this function is called,
1908 	 * but associated with a prior determination that no more
1909 	 * callbacks would be posted.  In the worst case, the first
1910 	 * barrier in _rcu_barrier() suffices (but the caller cannot
1911 	 * necessarily rely on this, not a substitute for the caller
1912 	 * getting the concurrency design right!).  There must also be
1913 	 * a barrier between the following load an posting of a callback
1914 	 * (if a callback is in fact needed).  This is associated with an
1915 	 * atomic_inc() in the caller.
1916 	 */
1917 	ret = atomic_long_read(&rdp->nocb_q_count);
1918 
1919 #ifdef CONFIG_PROVE_RCU
1920 	rhp = READ_ONCE(rdp->nocb_head);
1921 	if (!rhp)
1922 		rhp = READ_ONCE(rdp->nocb_gp_head);
1923 	if (!rhp)
1924 		rhp = READ_ONCE(rdp->nocb_follower_head);
1925 
1926 	/* Having no rcuo kthread but CBs after scheduler starts is bad! */
1927 	if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
1928 	    rcu_scheduler_fully_active) {
1929 		/* RCU callback enqueued before CPU first came online??? */
1930 		pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
1931 		       cpu, rhp->func);
1932 		WARN_ON_ONCE(1);
1933 	}
1934 #endif /* #ifdef CONFIG_PROVE_RCU */
1935 
1936 	return !!ret;
1937 }
1938 
1939 /*
1940  * Enqueue the specified string of rcu_head structures onto the specified
1941  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
1942  * string by rhp, and the tail of the string by rhtp.  The non-lazy/lazy
1943  * counts are supplied by rhcount and rhcount_lazy.
1944  *
1945  * If warranted, also wake up the kthread servicing this CPUs queues.
1946  */
1947 static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
1948 				    struct rcu_head *rhp,
1949 				    struct rcu_head **rhtp,
1950 				    int rhcount, int rhcount_lazy,
1951 				    unsigned long flags)
1952 {
1953 	int len;
1954 	struct rcu_head **old_rhpp;
1955 	struct task_struct *t;
1956 
1957 	/* Enqueue the callback on the nocb list and update counts. */
1958 	atomic_long_add(rhcount, &rdp->nocb_q_count);
1959 	/* rcu_barrier() relies on ->nocb_q_count add before xchg. */
1960 	old_rhpp = xchg(&rdp->nocb_tail, rhtp);
1961 	WRITE_ONCE(*old_rhpp, rhp);
1962 	atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
1963 	smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
1964 
1965 	/* If we are not being polled and there is a kthread, awaken it ... */
1966 	t = READ_ONCE(rdp->nocb_kthread);
1967 	if (rcu_nocb_poll || !t) {
1968 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1969 				    TPS("WakeNotPoll"));
1970 		return;
1971 	}
1972 	len = atomic_long_read(&rdp->nocb_q_count);
1973 	if (old_rhpp == &rdp->nocb_head) {
1974 		if (!irqs_disabled_flags(flags)) {
1975 			/* ... if queue was empty ... */
1976 			wake_nocb_leader(rdp, false);
1977 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1978 					    TPS("WakeEmpty"));
1979 		} else {
1980 			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE,
1981 					       TPS("WakeEmptyIsDeferred"));
1982 		}
1983 		rdp->qlen_last_fqs_check = 0;
1984 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
1985 		/* ... or if many callbacks queued. */
1986 		if (!irqs_disabled_flags(flags)) {
1987 			wake_nocb_leader(rdp, true);
1988 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
1989 					    TPS("WakeOvf"));
1990 		} else {
1991 			wake_nocb_leader_defer(rdp, RCU_NOCB_WAKE_FORCE,
1992 					       TPS("WakeOvfIsDeferred"));
1993 		}
1994 		rdp->qlen_last_fqs_check = LONG_MAX / 2;
1995 	} else {
1996 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeNot"));
1997 	}
1998 	return;
1999 }
2000 
2001 /*
2002  * This is a helper for __call_rcu(), which invokes this when the normal
2003  * callback queue is inoperable.  If this is not a no-CBs CPU, this
2004  * function returns failure back to __call_rcu(), which can complain
2005  * appropriately.
2006  *
2007  * Otherwise, this function queues the callback where the corresponding
2008  * "rcuo" kthread can find it.
2009  */
2010 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2011 			    bool lazy, unsigned long flags)
2012 {
2013 
2014 	if (!rcu_is_nocb_cpu(rdp->cpu))
2015 		return false;
2016 	__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
2017 	if (__is_kfree_rcu_offset((unsigned long)rhp->func))
2018 		trace_rcu_kfree_callback(rdp->rsp->name, rhp,
2019 					 (unsigned long)rhp->func,
2020 					 -atomic_long_read(&rdp->nocb_q_count_lazy),
2021 					 -atomic_long_read(&rdp->nocb_q_count));
2022 	else
2023 		trace_rcu_callback(rdp->rsp->name, rhp,
2024 				   -atomic_long_read(&rdp->nocb_q_count_lazy),
2025 				   -atomic_long_read(&rdp->nocb_q_count));
2026 
2027 	/*
2028 	 * If called from an extended quiescent state with interrupts
2029 	 * disabled, invoke the RCU core in order to allow the idle-entry
2030 	 * deferred-wakeup check to function.
2031 	 */
2032 	if (irqs_disabled_flags(flags) &&
2033 	    !rcu_is_watching() &&
2034 	    cpu_online(smp_processor_id()))
2035 		invoke_rcu_core();
2036 
2037 	return true;
2038 }
2039 
2040 /*
2041  * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2042  * not a no-CBs CPU.
2043  */
2044 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2045 						     struct rcu_data *rdp,
2046 						     unsigned long flags)
2047 {
2048 	lockdep_assert_irqs_disabled();
2049 	if (!rcu_is_nocb_cpu(smp_processor_id()))
2050 		return false; /* Not NOCBs CPU, caller must migrate CBs. */
2051 	__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
2052 				rcu_segcblist_tail(&rdp->cblist),
2053 				rcu_segcblist_n_cbs(&rdp->cblist),
2054 				rcu_segcblist_n_lazy_cbs(&rdp->cblist), flags);
2055 	rcu_segcblist_init(&rdp->cblist);
2056 	rcu_segcblist_disable(&rdp->cblist);
2057 	return true;
2058 }
2059 
2060 /*
2061  * If necessary, kick off a new grace period, and either way wait
2062  * for a subsequent grace period to complete.
2063  */
2064 static void rcu_nocb_wait_gp(struct rcu_data *rdp)
2065 {
2066 	unsigned long c;
2067 	bool d;
2068 	unsigned long flags;
2069 	bool needwake;
2070 	struct rcu_node *rnp = rdp->mynode;
2071 
2072 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
2073 	c = rcu_cbs_completed(rdp->rsp, rnp);
2074 	needwake = rcu_start_this_gp(rnp, rdp, c);
2075 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2076 	if (needwake)
2077 		rcu_gp_kthread_wake(rdp->rsp);
2078 
2079 	/*
2080 	 * Wait for the grace period.  Do so interruptibly to avoid messing
2081 	 * up the load average.
2082 	 */
2083 	trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
2084 	for (;;) {
2085 		swait_event_interruptible(
2086 			rnp->nocb_gp_wq[c & 0x1],
2087 			(d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
2088 		if (likely(d))
2089 			break;
2090 		WARN_ON(signal_pending(current));
2091 		trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait"));
2092 	}
2093 	trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait"));
2094 	smp_mb(); /* Ensure that CB invocation happens after GP end. */
2095 }
2096 
2097 /*
2098  * Leaders come here to wait for additional callbacks to show up.
2099  * This function does not return until callbacks appear.
2100  */
2101 static void nocb_leader_wait(struct rcu_data *my_rdp)
2102 {
2103 	bool firsttime = true;
2104 	unsigned long flags;
2105 	bool gotcbs;
2106 	struct rcu_data *rdp;
2107 	struct rcu_head **tail;
2108 
2109 wait_again:
2110 
2111 	/* Wait for callbacks to appear. */
2112 	if (!rcu_nocb_poll) {
2113 		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep"));
2114 		swait_event_interruptible(my_rdp->nocb_wq,
2115 				!READ_ONCE(my_rdp->nocb_leader_sleep));
2116 		raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2117 		my_rdp->nocb_leader_sleep = true;
2118 		WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2119 		del_timer(&my_rdp->nocb_timer);
2120 		raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2121 	} else if (firsttime) {
2122 		firsttime = false; /* Don't drown trace log with "Poll"! */
2123 		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Poll"));
2124 	}
2125 
2126 	/*
2127 	 * Each pass through the following loop checks a follower for CBs.
2128 	 * We are our own first follower.  Any CBs found are moved to
2129 	 * nocb_gp_head, where they await a grace period.
2130 	 */
2131 	gotcbs = false;
2132 	smp_mb(); /* wakeup and _sleep before ->nocb_head reads. */
2133 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2134 		rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
2135 		if (!rdp->nocb_gp_head)
2136 			continue;  /* No CBs here, try next follower. */
2137 
2138 		/* Move callbacks to wait-for-GP list, which is empty. */
2139 		WRITE_ONCE(rdp->nocb_head, NULL);
2140 		rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2141 		gotcbs = true;
2142 	}
2143 
2144 	/* No callbacks?  Sleep a bit if polling, and go retry.  */
2145 	if (unlikely(!gotcbs)) {
2146 		WARN_ON(signal_pending(current));
2147 		if (rcu_nocb_poll) {
2148 			schedule_timeout_interruptible(1);
2149 		} else {
2150 			trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
2151 					    TPS("WokeEmpty"));
2152 		}
2153 		goto wait_again;
2154 	}
2155 
2156 	/* Wait for one grace period. */
2157 	rcu_nocb_wait_gp(my_rdp);
2158 
2159 	/* Each pass through the following loop wakes a follower, if needed. */
2160 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
2161 		if (!rcu_nocb_poll &&
2162 		    READ_ONCE(rdp->nocb_head) &&
2163 		    READ_ONCE(my_rdp->nocb_leader_sleep)) {
2164 			raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags);
2165 			my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
2166 			raw_spin_unlock_irqrestore(&my_rdp->nocb_lock, flags);
2167 		}
2168 		if (!rdp->nocb_gp_head)
2169 			continue; /* No CBs, so no need to wake follower. */
2170 
2171 		/* Append callbacks to follower's "done" list. */
2172 		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2173 		tail = rdp->nocb_follower_tail;
2174 		rdp->nocb_follower_tail = rdp->nocb_gp_tail;
2175 		*tail = rdp->nocb_gp_head;
2176 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2177 		if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
2178 			/* List was empty, so wake up the follower.  */
2179 			swake_up(&rdp->nocb_wq);
2180 		}
2181 	}
2182 
2183 	/* If we (the leader) don't have CBs, go wait some more. */
2184 	if (!my_rdp->nocb_follower_head)
2185 		goto wait_again;
2186 }
2187 
2188 /*
2189  * Followers come here to wait for additional callbacks to show up.
2190  * This function does not return until callbacks appear.
2191  */
2192 static void nocb_follower_wait(struct rcu_data *rdp)
2193 {
2194 	for (;;) {
2195 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep"));
2196 		swait_event_interruptible(rdp->nocb_wq,
2197 					 READ_ONCE(rdp->nocb_follower_head));
2198 		if (smp_load_acquire(&rdp->nocb_follower_head)) {
2199 			/* ^^^ Ensure CB invocation follows _head test. */
2200 			return;
2201 		}
2202 		WARN_ON(signal_pending(current));
2203 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeEmpty"));
2204 	}
2205 }
2206 
2207 /*
2208  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
2209  * callbacks queued by the corresponding no-CBs CPU, however, there is
2210  * an optional leader-follower relationship so that the grace-period
2211  * kthreads don't have to do quite so many wakeups.
2212  */
2213 static int rcu_nocb_kthread(void *arg)
2214 {
2215 	int c, cl;
2216 	unsigned long flags;
2217 	struct rcu_head *list;
2218 	struct rcu_head *next;
2219 	struct rcu_head **tail;
2220 	struct rcu_data *rdp = arg;
2221 
2222 	/* Each pass through this loop invokes one batch of callbacks */
2223 	for (;;) {
2224 		/* Wait for callbacks. */
2225 		if (rdp->nocb_leader == rdp)
2226 			nocb_leader_wait(rdp);
2227 		else
2228 			nocb_follower_wait(rdp);
2229 
2230 		/* Pull the ready-to-invoke callbacks onto local list. */
2231 		raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2232 		list = rdp->nocb_follower_head;
2233 		rdp->nocb_follower_head = NULL;
2234 		tail = rdp->nocb_follower_tail;
2235 		rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2236 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2237 		BUG_ON(!list);
2238 		trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WokeNonEmpty"));
2239 
2240 		/* Each pass through the following loop invokes a callback. */
2241 		trace_rcu_batch_start(rdp->rsp->name,
2242 				      atomic_long_read(&rdp->nocb_q_count_lazy),
2243 				      atomic_long_read(&rdp->nocb_q_count), -1);
2244 		c = cl = 0;
2245 		while (list) {
2246 			next = list->next;
2247 			/* Wait for enqueuing to complete, if needed. */
2248 			while (next == NULL && &list->next != tail) {
2249 				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2250 						    TPS("WaitQueue"));
2251 				schedule_timeout_interruptible(1);
2252 				trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
2253 						    TPS("WokeQueue"));
2254 				next = list->next;
2255 			}
2256 			debug_rcu_head_unqueue(list);
2257 			local_bh_disable();
2258 			if (__rcu_reclaim(rdp->rsp->name, list))
2259 				cl++;
2260 			c++;
2261 			local_bh_enable();
2262 			cond_resched_tasks_rcu_qs();
2263 			list = next;
2264 		}
2265 		trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2266 		smp_mb__before_atomic();  /* _add after CB invocation. */
2267 		atomic_long_add(-c, &rdp->nocb_q_count);
2268 		atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
2269 	}
2270 	return 0;
2271 }
2272 
2273 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
2274 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2275 {
2276 	return READ_ONCE(rdp->nocb_defer_wakeup);
2277 }
2278 
2279 /* Do a deferred wakeup of rcu_nocb_kthread(). */
2280 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
2281 {
2282 	unsigned long flags;
2283 	int ndw;
2284 
2285 	raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
2286 	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
2287 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
2288 		return;
2289 	}
2290 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2291 	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
2292 	__wake_nocb_leader(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
2293 	trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
2294 }
2295 
2296 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
2297 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
2298 {
2299 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
2300 
2301 	do_nocb_deferred_wakeup_common(rdp);
2302 }
2303 
2304 /*
2305  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
2306  * This means we do an inexact common-case check.  Note that if
2307  * we miss, ->nocb_timer will eventually clean things up.
2308  */
2309 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2310 {
2311 	if (rcu_nocb_need_deferred_wakeup(rdp))
2312 		do_nocb_deferred_wakeup_common(rdp);
2313 }
2314 
2315 void __init rcu_init_nohz(void)
2316 {
2317 	int cpu;
2318 	bool need_rcu_nocb_mask = false;
2319 	struct rcu_state *rsp;
2320 
2321 #if defined(CONFIG_NO_HZ_FULL)
2322 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2323 		need_rcu_nocb_mask = true;
2324 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2325 
2326 	if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
2327 		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2328 			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2329 			return;
2330 		}
2331 	}
2332 	if (!cpumask_available(rcu_nocb_mask))
2333 		return;
2334 
2335 #if defined(CONFIG_NO_HZ_FULL)
2336 	if (tick_nohz_full_running)
2337 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2338 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2339 
2340 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2341 		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
2342 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2343 			    rcu_nocb_mask);
2344 	}
2345 	if (cpumask_empty(rcu_nocb_mask))
2346 		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
2347 	else
2348 		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2349 			cpumask_pr_args(rcu_nocb_mask));
2350 	if (rcu_nocb_poll)
2351 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2352 
2353 	for_each_rcu_flavor(rsp) {
2354 		for_each_cpu(cpu, rcu_nocb_mask)
2355 			init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu));
2356 		rcu_organize_nocb_kthreads(rsp);
2357 	}
2358 }
2359 
2360 /* Initialize per-rcu_data variables for no-CBs CPUs. */
2361 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2362 {
2363 	rdp->nocb_tail = &rdp->nocb_head;
2364 	init_swait_queue_head(&rdp->nocb_wq);
2365 	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
2366 	raw_spin_lock_init(&rdp->nocb_lock);
2367 	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
2368 }
2369 
2370 /*
2371  * If the specified CPU is a no-CBs CPU that does not already have its
2372  * rcuo kthread for the specified RCU flavor, spawn it.  If the CPUs are
2373  * brought online out of order, this can require re-organizing the
2374  * leader-follower relationships.
2375  */
2376 static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
2377 {
2378 	struct rcu_data *rdp;
2379 	struct rcu_data *rdp_last;
2380 	struct rcu_data *rdp_old_leader;
2381 	struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu);
2382 	struct task_struct *t;
2383 
2384 	/*
2385 	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2386 	 * then nothing to do.
2387 	 */
2388 	if (!rcu_is_nocb_cpu(cpu) || rdp_spawn->nocb_kthread)
2389 		return;
2390 
2391 	/* If we didn't spawn the leader first, reorganize! */
2392 	rdp_old_leader = rdp_spawn->nocb_leader;
2393 	if (rdp_old_leader != rdp_spawn && !rdp_old_leader->nocb_kthread) {
2394 		rdp_last = NULL;
2395 		rdp = rdp_old_leader;
2396 		do {
2397 			rdp->nocb_leader = rdp_spawn;
2398 			if (rdp_last && rdp != rdp_spawn)
2399 				rdp_last->nocb_next_follower = rdp;
2400 			if (rdp == rdp_spawn) {
2401 				rdp = rdp->nocb_next_follower;
2402 			} else {
2403 				rdp_last = rdp;
2404 				rdp = rdp->nocb_next_follower;
2405 				rdp_last->nocb_next_follower = NULL;
2406 			}
2407 		} while (rdp);
2408 		rdp_spawn->nocb_next_follower = rdp_old_leader;
2409 	}
2410 
2411 	/* Spawn the kthread for this CPU and RCU flavor. */
2412 	t = kthread_run(rcu_nocb_kthread, rdp_spawn,
2413 			"rcuo%c/%d", rsp->abbr, cpu);
2414 	BUG_ON(IS_ERR(t));
2415 	WRITE_ONCE(rdp_spawn->nocb_kthread, t);
2416 }
2417 
2418 /*
2419  * If the specified CPU is a no-CBs CPU that does not already have its
2420  * rcuo kthreads, spawn them.
2421  */
2422 static void rcu_spawn_all_nocb_kthreads(int cpu)
2423 {
2424 	struct rcu_state *rsp;
2425 
2426 	if (rcu_scheduler_fully_active)
2427 		for_each_rcu_flavor(rsp)
2428 			rcu_spawn_one_nocb_kthread(rsp, cpu);
2429 }
2430 
2431 /*
2432  * Once the scheduler is running, spawn rcuo kthreads for all online
2433  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2434  * non-boot CPUs come online -- if this changes, we will need to add
2435  * some mutual exclusion.
2436  */
2437 static void __init rcu_spawn_nocb_kthreads(void)
2438 {
2439 	int cpu;
2440 
2441 	for_each_online_cpu(cpu)
2442 		rcu_spawn_all_nocb_kthreads(cpu);
2443 }
2444 
2445 /* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
2446 static int rcu_nocb_leader_stride = -1;
2447 module_param(rcu_nocb_leader_stride, int, 0444);
2448 
2449 /*
2450  * Initialize leader-follower relationships for all no-CBs CPU.
2451  */
2452 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
2453 {
2454 	int cpu;
2455 	int ls = rcu_nocb_leader_stride;
2456 	int nl = 0;  /* Next leader. */
2457 	struct rcu_data *rdp;
2458 	struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
2459 	struct rcu_data *rdp_prev = NULL;
2460 
2461 	if (!cpumask_available(rcu_nocb_mask))
2462 		return;
2463 	if (ls == -1) {
2464 		ls = int_sqrt(nr_cpu_ids);
2465 		rcu_nocb_leader_stride = ls;
2466 	}
2467 
2468 	/*
2469 	 * Each pass through this loop sets up one rcu_data structure.
2470 	 * Should the corresponding CPU come online in the future, then
2471 	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
2472 	 */
2473 	for_each_cpu(cpu, rcu_nocb_mask) {
2474 		rdp = per_cpu_ptr(rsp->rda, cpu);
2475 		if (rdp->cpu >= nl) {
2476 			/* New leader, set up for followers & next leader. */
2477 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2478 			rdp->nocb_leader = rdp;
2479 			rdp_leader = rdp;
2480 		} else {
2481 			/* Another follower, link to previous leader. */
2482 			rdp->nocb_leader = rdp_leader;
2483 			rdp_prev->nocb_next_follower = rdp;
2484 		}
2485 		rdp_prev = rdp;
2486 	}
2487 }
2488 
2489 /* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
2490 static bool init_nocb_callback_list(struct rcu_data *rdp)
2491 {
2492 	if (!rcu_is_nocb_cpu(rdp->cpu))
2493 		return false;
2494 
2495 	/* If there are early-boot callbacks, move them to nocb lists. */
2496 	if (!rcu_segcblist_empty(&rdp->cblist)) {
2497 		rdp->nocb_head = rcu_segcblist_head(&rdp->cblist);
2498 		rdp->nocb_tail = rcu_segcblist_tail(&rdp->cblist);
2499 		atomic_long_set(&rdp->nocb_q_count,
2500 				rcu_segcblist_n_cbs(&rdp->cblist));
2501 		atomic_long_set(&rdp->nocb_q_count_lazy,
2502 				rcu_segcblist_n_lazy_cbs(&rdp->cblist));
2503 		rcu_segcblist_init(&rdp->cblist);
2504 	}
2505 	rcu_segcblist_disable(&rdp->cblist);
2506 	return true;
2507 }
2508 
2509 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2510 
2511 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2512 {
2513 	WARN_ON_ONCE(1); /* Should be dead code. */
2514 	return false;
2515 }
2516 
2517 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
2518 {
2519 }
2520 
2521 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2522 {
2523 	return NULL;
2524 }
2525 
2526 static void rcu_init_one_nocb(struct rcu_node *rnp)
2527 {
2528 }
2529 
2530 static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2531 			    bool lazy, unsigned long flags)
2532 {
2533 	return false;
2534 }
2535 
2536 static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
2537 						     struct rcu_data *rdp,
2538 						     unsigned long flags)
2539 {
2540 	return false;
2541 }
2542 
2543 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2544 {
2545 }
2546 
2547 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2548 {
2549 	return false;
2550 }
2551 
2552 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2553 {
2554 }
2555 
2556 static void rcu_spawn_all_nocb_kthreads(int cpu)
2557 {
2558 }
2559 
2560 static void __init rcu_spawn_nocb_kthreads(void)
2561 {
2562 }
2563 
2564 static bool init_nocb_callback_list(struct rcu_data *rdp)
2565 {
2566 	return false;
2567 }
2568 
2569 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2570 
2571 /*
2572  * An adaptive-ticks CPU can potentially execute in kernel mode for an
2573  * arbitrarily long period of time with the scheduling-clock tick turned
2574  * off.  RCU will be paying attention to this CPU because it is in the
2575  * kernel, but the CPU cannot be guaranteed to be executing the RCU state
2576  * machine because the scheduling-clock tick has been disabled.  Therefore,
2577  * if an adaptive-ticks CPU is failing to respond to the current grace
2578  * period and has not be idle from an RCU perspective, kick it.
2579  */
2580 static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2581 {
2582 #ifdef CONFIG_NO_HZ_FULL
2583 	if (tick_nohz_full_cpu(cpu))
2584 		smp_send_reschedule(cpu);
2585 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2586 }
2587 
2588 /*
2589  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2590  * grace-period kthread will do force_quiescent_state() processing?
2591  * The idea is to avoid waking up RCU core processing on such a
2592  * CPU unless the grace period has extended for too long.
2593  *
2594  * This code relies on the fact that all NO_HZ_FULL CPUs are also
2595  * CONFIG_RCU_NOCB_CPU CPUs.
2596  */
2597 static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
2598 {
2599 #ifdef CONFIG_NO_HZ_FULL
2600 	if (tick_nohz_full_cpu(smp_processor_id()) &&
2601 	    (!rcu_gp_in_progress(rsp) ||
2602 	     ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
2603 		return true;
2604 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2605 	return false;
2606 }
2607 
2608 /*
2609  * Bind the RCU grace-period kthreads to the housekeeping CPU.
2610  */
2611 static void rcu_bind_gp_kthread(void)
2612 {
2613 	int __maybe_unused cpu;
2614 
2615 	if (!tick_nohz_full_enabled())
2616 		return;
2617 	housekeeping_affine(current, HK_FLAG_RCU);
2618 }
2619 
2620 /* Record the current task on dyntick-idle entry. */
2621 static void rcu_dynticks_task_enter(void)
2622 {
2623 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2624 	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
2625 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2626 }
2627 
2628 /* Record no current task on dyntick-idle exit. */
2629 static void rcu_dynticks_task_exit(void)
2630 {
2631 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2632 	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
2633 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2634 }
2635