1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 #include "rcu_segcblist.h"
10
11 ////////////////////////////////////////////////////////////////////////
12 //
13 // Generic data structures.
14
15 struct rcu_tasks;
16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17 typedef void (*pregp_func_t)(struct list_head *hop);
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19 typedef void (*postscan_func_t)(struct list_head *hop);
20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22
23 /**
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
31 * @rtp_work: Work queue for invoking callbacks.
32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
33 * @barrier_q_head: RCU callback for barrier operation.
34 * @rtp_blkd_tasks: List of tasks blocked as readers.
35 * @cpu: CPU number corresponding to this entry.
36 * @rtpp: Pointer to the rcu_tasks structure.
37 */
38 struct rcu_tasks_percpu {
39 struct rcu_segcblist cblist;
40 raw_spinlock_t __private lock;
41 unsigned long rtp_jiffies;
42 unsigned long rtp_n_lock_retries;
43 struct timer_list lazy_timer;
44 unsigned int urgent_gp;
45 struct work_struct rtp_work;
46 struct irq_work rtp_irq_work;
47 struct rcu_head barrier_q_head;
48 struct list_head rtp_blkd_tasks;
49 int cpu;
50 struct rcu_tasks *rtpp;
51 };
52
53 /**
54 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
55 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
56 * @cbs_gbl_lock: Lock protecting callback list.
57 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
58 * @gp_func: This flavor's grace-period-wait function.
59 * @gp_state: Grace period's most recent state transition (debugging).
60 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
61 * @init_fract: Initial backoff sleep interval.
62 * @gp_jiffies: Time of last @gp_state transition.
63 * @gp_start: Most recent grace-period start in jiffies.
64 * @tasks_gp_seq: Number of grace periods completed since boot.
65 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
66 * @n_ipis_fails: Number of IPI-send failures.
67 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
68 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
69 * @pregp_func: This flavor's pre-grace-period function (optional).
70 * @pertask_func: This flavor's per-task scan function (optional).
71 * @postscan_func: This flavor's post-task scan function (optional).
72 * @holdouts_func: This flavor's holdout-list scan function (optional).
73 * @postgp_func: This flavor's post-grace-period function (optional).
74 * @call_func: This flavor's call_rcu()-equivalent function.
75 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
76 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
77 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
78 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
79 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
80 * @barrier_q_mutex: Serialize barrier operations.
81 * @barrier_q_count: Number of queues being waited on.
82 * @barrier_q_completion: Barrier wait/wakeup mechanism.
83 * @barrier_q_seq: Sequence number for barrier operations.
84 * @name: This flavor's textual name.
85 * @kname: This flavor's kthread name.
86 */
87 struct rcu_tasks {
88 struct rcuwait cbs_wait;
89 raw_spinlock_t cbs_gbl_lock;
90 struct mutex tasks_gp_mutex;
91 int gp_state;
92 int gp_sleep;
93 int init_fract;
94 unsigned long gp_jiffies;
95 unsigned long gp_start;
96 unsigned long tasks_gp_seq;
97 unsigned long n_ipis;
98 unsigned long n_ipis_fails;
99 struct task_struct *kthread_ptr;
100 unsigned long lazy_jiffies;
101 rcu_tasks_gp_func_t gp_func;
102 pregp_func_t pregp_func;
103 pertask_func_t pertask_func;
104 postscan_func_t postscan_func;
105 holdouts_func_t holdouts_func;
106 postgp_func_t postgp_func;
107 call_rcu_func_t call_func;
108 struct rcu_tasks_percpu __percpu *rtpcpu;
109 int percpu_enqueue_shift;
110 int percpu_enqueue_lim;
111 int percpu_dequeue_lim;
112 unsigned long percpu_dequeue_gpseq;
113 struct mutex barrier_q_mutex;
114 atomic_t barrier_q_count;
115 struct completion barrier_q_completion;
116 unsigned long barrier_q_seq;
117 char *name;
118 char *kname;
119 };
120
121 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
122
123 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
124 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
125 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
126 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
127 }; \
128 static struct rcu_tasks rt_name = \
129 { \
130 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
131 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
132 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
133 .gp_func = gp, \
134 .call_func = call, \
135 .rtpcpu = &rt_name ## __percpu, \
136 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
137 .name = n, \
138 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
139 .percpu_enqueue_lim = 1, \
140 .percpu_dequeue_lim = 1, \
141 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
142 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
143 .kname = #rt_name, \
144 }
145
146 #ifdef CONFIG_TASKS_RCU
147 /* Track exiting tasks in order to allow them to be waited for. */
148 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
149
150 /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
151 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
152 static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
153 #endif
154
155 /* Avoid IPIing CPUs early in the grace period. */
156 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
157 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
158 module_param(rcu_task_ipi_delay, int, 0644);
159
160 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
161 #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
162 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
163 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
164 module_param(rcu_task_stall_timeout, int, 0644);
165 #define RCU_TASK_STALL_INFO (HZ * 10)
166 static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
167 module_param(rcu_task_stall_info, int, 0644);
168 static int rcu_task_stall_info_mult __read_mostly = 3;
169 module_param(rcu_task_stall_info_mult, int, 0444);
170
171 static int rcu_task_enqueue_lim __read_mostly = -1;
172 module_param(rcu_task_enqueue_lim, int, 0444);
173
174 static bool rcu_task_cb_adjust;
175 static int rcu_task_contend_lim __read_mostly = 100;
176 module_param(rcu_task_contend_lim, int, 0444);
177 static int rcu_task_collapse_lim __read_mostly = 10;
178 module_param(rcu_task_collapse_lim, int, 0444);
179 static int rcu_task_lazy_lim __read_mostly = 32;
180 module_param(rcu_task_lazy_lim, int, 0444);
181
182 /* RCU tasks grace-period state for debugging. */
183 #define RTGS_INIT 0
184 #define RTGS_WAIT_WAIT_CBS 1
185 #define RTGS_WAIT_GP 2
186 #define RTGS_PRE_WAIT_GP 3
187 #define RTGS_SCAN_TASKLIST 4
188 #define RTGS_POST_SCAN_TASKLIST 5
189 #define RTGS_WAIT_SCAN_HOLDOUTS 6
190 #define RTGS_SCAN_HOLDOUTS 7
191 #define RTGS_POST_GP 8
192 #define RTGS_WAIT_READERS 9
193 #define RTGS_INVOKE_CBS 10
194 #define RTGS_WAIT_CBS 11
195 #ifndef CONFIG_TINY_RCU
196 static const char * const rcu_tasks_gp_state_names[] = {
197 "RTGS_INIT",
198 "RTGS_WAIT_WAIT_CBS",
199 "RTGS_WAIT_GP",
200 "RTGS_PRE_WAIT_GP",
201 "RTGS_SCAN_TASKLIST",
202 "RTGS_POST_SCAN_TASKLIST",
203 "RTGS_WAIT_SCAN_HOLDOUTS",
204 "RTGS_SCAN_HOLDOUTS",
205 "RTGS_POST_GP",
206 "RTGS_WAIT_READERS",
207 "RTGS_INVOKE_CBS",
208 "RTGS_WAIT_CBS",
209 };
210 #endif /* #ifndef CONFIG_TINY_RCU */
211
212 ////////////////////////////////////////////////////////////////////////
213 //
214 // Generic code.
215
216 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
217
218 /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)219 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
220 {
221 rtp->gp_state = newstate;
222 rtp->gp_jiffies = jiffies;
223 }
224
225 #ifndef CONFIG_TINY_RCU
226 /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)227 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
228 {
229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
230 int j = READ_ONCE(i); // Prevent the compiler from reading twice
231
232 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
233 return "???";
234 return rcu_tasks_gp_state_names[j];
235 }
236 #endif /* #ifndef CONFIG_TINY_RCU */
237
238 // Initialize per-CPU callback lists for the specified flavor of
239 // Tasks RCU. Do not enqueue callbacks before this function is invoked.
cblist_init_generic(struct rcu_tasks * rtp)240 static void cblist_init_generic(struct rcu_tasks *rtp)
241 {
242 int cpu;
243 unsigned long flags;
244 int lim;
245 int shift;
246
247 if (rcu_task_enqueue_lim < 0) {
248 rcu_task_enqueue_lim = 1;
249 rcu_task_cb_adjust = true;
250 } else if (rcu_task_enqueue_lim == 0) {
251 rcu_task_enqueue_lim = 1;
252 }
253 lim = rcu_task_enqueue_lim;
254
255 if (lim > nr_cpu_ids)
256 lim = nr_cpu_ids;
257 shift = ilog2(nr_cpu_ids / lim);
258 if (((nr_cpu_ids - 1) >> shift) >= lim)
259 shift++;
260 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
261 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
262 smp_store_release(&rtp->percpu_enqueue_lim, lim);
263 for_each_possible_cpu(cpu) {
264 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
265
266 WARN_ON_ONCE(!rtpcp);
267 if (cpu)
268 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
269 local_irq_save(flags); // serialize initialization
270 if (rcu_segcblist_empty(&rtpcp->cblist))
271 rcu_segcblist_init(&rtpcp->cblist);
272 local_irq_restore(flags);
273 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
274 rtpcp->cpu = cpu;
275 rtpcp->rtpp = rtp;
276 if (!rtpcp->rtp_blkd_tasks.next)
277 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
278 }
279
280 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
281 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
282 }
283
284 // Compute wakeup time for lazy callback timer.
rcu_tasks_lazy_time(struct rcu_tasks * rtp)285 static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
286 {
287 return jiffies + rtp->lazy_jiffies;
288 }
289
290 // Timer handler that unlazifies lazy callbacks.
call_rcu_tasks_generic_timer(struct timer_list * tlp)291 static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
292 {
293 unsigned long flags;
294 bool needwake = false;
295 struct rcu_tasks *rtp;
296 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
297
298 rtp = rtpcp->rtpp;
299 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
300 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
301 if (!rtpcp->urgent_gp)
302 rtpcp->urgent_gp = 1;
303 needwake = true;
304 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
305 }
306 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
307 if (needwake)
308 rcuwait_wake_up(&rtp->cbs_wait);
309 }
310
311 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
call_rcu_tasks_iw_wakeup(struct irq_work * iwp)312 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
313 {
314 struct rcu_tasks *rtp;
315 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
316
317 rtp = rtpcp->rtpp;
318 rcuwait_wake_up(&rtp->cbs_wait);
319 }
320
321 // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)322 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
323 struct rcu_tasks *rtp)
324 {
325 int chosen_cpu;
326 unsigned long flags;
327 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
328 int ideal_cpu;
329 unsigned long j;
330 bool needadjust = false;
331 bool needwake;
332 struct rcu_tasks_percpu *rtpcp;
333
334 rhp->next = NULL;
335 rhp->func = func;
336 local_irq_save(flags);
337 rcu_read_lock();
338 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
339 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
340 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
341 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
342 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
343 j = jiffies;
344 if (rtpcp->rtp_jiffies != j) {
345 rtpcp->rtp_jiffies = j;
346 rtpcp->rtp_n_lock_retries = 0;
347 }
348 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
349 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
350 needadjust = true; // Defer adjustment to avoid deadlock.
351 }
352 // Queuing callbacks before initialization not yet supported.
353 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
354 rcu_segcblist_init(&rtpcp->cblist);
355 needwake = (func == wakeme_after_rcu) ||
356 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
357 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
358 if (rtp->lazy_jiffies)
359 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
360 else
361 needwake = rcu_segcblist_empty(&rtpcp->cblist);
362 }
363 if (needwake)
364 rtpcp->urgent_gp = 3;
365 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
366 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
367 if (unlikely(needadjust)) {
368 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
369 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
370 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
371 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
372 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
373 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
374 }
375 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
376 }
377 rcu_read_unlock();
378 /* We can't create the thread unless interrupts are enabled. */
379 if (needwake && READ_ONCE(rtp->kthread_ptr))
380 irq_work_queue(&rtpcp->rtp_irq_work);
381 }
382
383 // RCU callback function for rcu_barrier_tasks_generic().
rcu_barrier_tasks_generic_cb(struct rcu_head * rhp)384 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
385 {
386 struct rcu_tasks *rtp;
387 struct rcu_tasks_percpu *rtpcp;
388
389 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
390 rtp = rtpcp->rtpp;
391 if (atomic_dec_and_test(&rtp->barrier_q_count))
392 complete(&rtp->barrier_q_completion);
393 }
394
395 // Wait for all in-flight callbacks for the specified RCU Tasks flavor.
396 // Operates in a manner similar to rcu_barrier().
rcu_barrier_tasks_generic(struct rcu_tasks * rtp)397 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
398 {
399 int cpu;
400 unsigned long flags;
401 struct rcu_tasks_percpu *rtpcp;
402 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
403
404 mutex_lock(&rtp->barrier_q_mutex);
405 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
406 smp_mb();
407 mutex_unlock(&rtp->barrier_q_mutex);
408 return;
409 }
410 rcu_seq_start(&rtp->barrier_q_seq);
411 init_completion(&rtp->barrier_q_completion);
412 atomic_set(&rtp->barrier_q_count, 2);
413 for_each_possible_cpu(cpu) {
414 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
415 break;
416 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
417 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
418 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
419 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
420 atomic_inc(&rtp->barrier_q_count);
421 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
422 }
423 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
424 complete(&rtp->barrier_q_completion);
425 wait_for_completion(&rtp->barrier_q_completion);
426 rcu_seq_end(&rtp->barrier_q_seq);
427 mutex_unlock(&rtp->barrier_q_mutex);
428 }
429
430 // Advance callbacks and indicate whether either a grace period or
431 // callback invocation is needed.
rcu_tasks_need_gpcb(struct rcu_tasks * rtp)432 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
433 {
434 int cpu;
435 unsigned long flags;
436 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
437 long n;
438 long ncbs = 0;
439 long ncbsnz = 0;
440 int needgpcb = 0;
441
442 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
443 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
444
445 /* Advance and accelerate any new callbacks. */
446 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
447 continue;
448 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
449 // Should we shrink down to a single callback queue?
450 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
451 if (n) {
452 ncbs += n;
453 if (cpu > 0)
454 ncbsnz += n;
455 }
456 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
457 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
458 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
459 if (rtp->lazy_jiffies)
460 rtpcp->urgent_gp--;
461 needgpcb |= 0x3;
462 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
463 rtpcp->urgent_gp = 0;
464 }
465 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
466 needgpcb |= 0x1;
467 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
468 }
469
470 // Shrink down to a single callback queue if appropriate.
471 // This is done in two stages: (1) If there are no more than
472 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
473 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
474 // if there has not been an increase in callbacks, limit dequeuing
475 // to CPU 0. Note the matching RCU read-side critical section in
476 // call_rcu_tasks_generic().
477 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
478 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
479 if (rtp->percpu_enqueue_lim > 1) {
480 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
481 smp_store_release(&rtp->percpu_enqueue_lim, 1);
482 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
483 gpdone = false;
484 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
485 }
486 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
487 }
488 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
489 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
490 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
491 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
492 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
493 }
494 if (rtp->percpu_dequeue_lim == 1) {
495 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
496 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
497
498 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
499 }
500 }
501 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
502 }
503
504 return needgpcb;
505 }
506
507 // Advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs(struct rcu_tasks * rtp,struct rcu_tasks_percpu * rtpcp)508 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
509 {
510 int cpu;
511 int cpunext;
512 int cpuwq;
513 unsigned long flags;
514 int len;
515 struct rcu_head *rhp;
516 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
517 struct rcu_tasks_percpu *rtpcp_next;
518
519 cpu = rtpcp->cpu;
520 cpunext = cpu * 2 + 1;
521 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
522 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
523 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
524 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
525 cpunext++;
526 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
527 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
528 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
529 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
530 }
531 }
532
533 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
534 return;
535 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
536 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
537 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
538 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
539 len = rcl.len;
540 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
541 debug_rcu_head_callback(rhp);
542 local_bh_disable();
543 rhp->func(rhp);
544 local_bh_enable();
545 cond_resched();
546 }
547 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
548 rcu_segcblist_add_len(&rtpcp->cblist, -len);
549 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
550 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
551 }
552
553 // Workqueue flood to advance callbacks and invoke any that are ready.
rcu_tasks_invoke_cbs_wq(struct work_struct * wp)554 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
555 {
556 struct rcu_tasks *rtp;
557 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
558
559 rtp = rtpcp->rtpp;
560 rcu_tasks_invoke_cbs(rtp, rtpcp);
561 }
562
563 // Wait for one grace period.
rcu_tasks_one_gp(struct rcu_tasks * rtp,bool midboot)564 static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
565 {
566 int needgpcb;
567
568 mutex_lock(&rtp->tasks_gp_mutex);
569
570 // If there were none, wait a bit and start over.
571 if (unlikely(midboot)) {
572 needgpcb = 0x2;
573 } else {
574 mutex_unlock(&rtp->tasks_gp_mutex);
575 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
576 rcuwait_wait_event(&rtp->cbs_wait,
577 (needgpcb = rcu_tasks_need_gpcb(rtp)),
578 TASK_IDLE);
579 mutex_lock(&rtp->tasks_gp_mutex);
580 }
581
582 if (needgpcb & 0x2) {
583 // Wait for one grace period.
584 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
585 rtp->gp_start = jiffies;
586 rcu_seq_start(&rtp->tasks_gp_seq);
587 rtp->gp_func(rtp);
588 rcu_seq_end(&rtp->tasks_gp_seq);
589 }
590
591 // Invoke callbacks.
592 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
593 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
594 mutex_unlock(&rtp->tasks_gp_mutex);
595 }
596
597 // RCU-tasks kthread that detects grace periods and invokes callbacks.
rcu_tasks_kthread(void * arg)598 static int __noreturn rcu_tasks_kthread(void *arg)
599 {
600 int cpu;
601 struct rcu_tasks *rtp = arg;
602
603 for_each_possible_cpu(cpu) {
604 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
605
606 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
607 rtpcp->urgent_gp = 1;
608 }
609
610 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
611 housekeeping_affine(current, HK_TYPE_RCU);
612 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
613
614 /*
615 * Each pass through the following loop makes one check for
616 * newly arrived callbacks, and, if there are some, waits for
617 * one RCU-tasks grace period and then invokes the callbacks.
618 * This loop is terminated by the system going down. ;-)
619 */
620 for (;;) {
621 // Wait for one grace period and invoke any callbacks
622 // that are ready.
623 rcu_tasks_one_gp(rtp, false);
624
625 // Paranoid sleep to keep this from entering a tight loop.
626 schedule_timeout_idle(rtp->gp_sleep);
627 }
628 }
629
630 // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)631 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
632 {
633 /* Complain if the scheduler has not started. */
634 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
635 "synchronize_%s() called too soon", rtp->name))
636 return;
637
638 // If the grace-period kthread is running, use it.
639 if (READ_ONCE(rtp->kthread_ptr)) {
640 wait_rcu_gp(rtp->call_func);
641 return;
642 }
643 rcu_tasks_one_gp(rtp, true);
644 }
645
646 /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)647 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
648 {
649 struct task_struct *t;
650
651 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
652 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
653 return;
654 smp_mb(); /* Ensure others see full kthread. */
655 }
656
657 #ifndef CONFIG_TINY_RCU
658
659 /*
660 * Print any non-default Tasks RCU settings.
661 */
rcu_tasks_bootup_oddness(void)662 static void __init rcu_tasks_bootup_oddness(void)
663 {
664 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
665 int rtsimc;
666
667 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
668 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
669 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
670 if (rtsimc != rcu_task_stall_info_mult) {
671 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
672 rcu_task_stall_info_mult = rtsimc;
673 }
674 #endif /* #ifdef CONFIG_TASKS_RCU */
675 #ifdef CONFIG_TASKS_RCU
676 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
677 #endif /* #ifdef CONFIG_TASKS_RCU */
678 #ifdef CONFIG_TASKS_RUDE_RCU
679 pr_info("\tRude variant of Tasks RCU enabled.\n");
680 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
681 #ifdef CONFIG_TASKS_TRACE_RCU
682 pr_info("\tTracing variant of Tasks RCU enabled.\n");
683 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
684 }
685
686 #endif /* #ifndef CONFIG_TINY_RCU */
687
688 #ifndef CONFIG_TINY_RCU
689 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)690 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
691 {
692 int cpu;
693 bool havecbs = false;
694 bool haveurgent = false;
695 bool haveurgentcbs = false;
696
697 for_each_possible_cpu(cpu) {
698 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
699
700 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
701 havecbs = true;
702 if (data_race(rtpcp->urgent_gp))
703 haveurgent = true;
704 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
705 haveurgentcbs = true;
706 if (havecbs && haveurgent && haveurgentcbs)
707 break;
708 }
709 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
710 rtp->kname,
711 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
712 jiffies - data_race(rtp->gp_jiffies),
713 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
714 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
715 ".k"[!!data_race(rtp->kthread_ptr)],
716 ".C"[havecbs],
717 ".u"[haveurgent],
718 ".U"[haveurgentcbs],
719 rtp->lazy_jiffies,
720 s);
721 }
722 #endif // #ifndef CONFIG_TINY_RCU
723
724 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
725
726 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
727
728 ////////////////////////////////////////////////////////////////////////
729 //
730 // Shared code between task-list-scanning variants of Tasks RCU.
731
732 /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)733 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
734 {
735 struct task_struct *g;
736 int fract;
737 LIST_HEAD(holdouts);
738 unsigned long j;
739 unsigned long lastinfo;
740 unsigned long lastreport;
741 bool reported = false;
742 int rtsi;
743 struct task_struct *t;
744
745 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
746 rtp->pregp_func(&holdouts);
747
748 /*
749 * There were callbacks, so we need to wait for an RCU-tasks
750 * grace period. Start off by scanning the task list for tasks
751 * that are not already voluntarily blocked. Mark these tasks
752 * and make a list of them in holdouts.
753 */
754 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
755 if (rtp->pertask_func) {
756 rcu_read_lock();
757 for_each_process_thread(g, t)
758 rtp->pertask_func(t, &holdouts);
759 rcu_read_unlock();
760 }
761
762 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
763 rtp->postscan_func(&holdouts);
764
765 /*
766 * Each pass through the following loop scans the list of holdout
767 * tasks, removing any that are no longer holdouts. When the list
768 * is empty, we are done.
769 */
770 lastreport = jiffies;
771 lastinfo = lastreport;
772 rtsi = READ_ONCE(rcu_task_stall_info);
773
774 // Start off with initial wait and slowly back off to 1 HZ wait.
775 fract = rtp->init_fract;
776
777 while (!list_empty(&holdouts)) {
778 ktime_t exp;
779 bool firstreport;
780 bool needreport;
781 int rtst;
782
783 // Slowly back off waiting for holdouts
784 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
785 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
786 schedule_timeout_idle(fract);
787 } else {
788 exp = jiffies_to_nsecs(fract);
789 __set_current_state(TASK_IDLE);
790 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
791 }
792
793 if (fract < HZ)
794 fract++;
795
796 rtst = READ_ONCE(rcu_task_stall_timeout);
797 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
798 if (needreport) {
799 lastreport = jiffies;
800 reported = true;
801 }
802 firstreport = true;
803 WARN_ON(signal_pending(current));
804 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
805 rtp->holdouts_func(&holdouts, needreport, &firstreport);
806
807 // Print pre-stall informational messages if needed.
808 j = jiffies;
809 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
810 lastinfo = j;
811 rtsi = rtsi * rcu_task_stall_info_mult;
812 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
813 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
814 }
815 }
816
817 set_tasks_gp_state(rtp, RTGS_POST_GP);
818 rtp->postgp_func(rtp);
819 }
820
821 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
822
823 #ifdef CONFIG_TASKS_RCU
824
825 ////////////////////////////////////////////////////////////////////////
826 //
827 // Simple variant of RCU whose quiescent states are voluntary context
828 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
829 // As such, grace periods can take one good long time. There are no
830 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
831 // because this implementation is intended to get the system into a safe
832 // state for some of the manipulations involved in tracing and the like.
833 // Finally, this implementation does not support high call_rcu_tasks()
834 // rates from multiple CPUs. If this is required, per-CPU callback lists
835 // will be needed.
836 //
837 // The implementation uses rcu_tasks_wait_gp(), which relies on function
838 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
839 // function sets these function pointers up so that rcu_tasks_wait_gp()
840 // invokes these functions in this order:
841 //
842 // rcu_tasks_pregp_step():
843 // Invokes synchronize_rcu() in order to wait for all in-flight
844 // t->on_rq and t->nvcsw transitions to complete. This works because
845 // all such transitions are carried out with interrupts disabled.
846 // rcu_tasks_pertask(), invoked on every non-idle task:
847 // For every runnable non-idle task other than the current one, use
848 // get_task_struct() to pin down that task, snapshot that task's
849 // number of voluntary context switches, and add that task to the
850 // holdout list.
851 // rcu_tasks_postscan():
852 // Invoke synchronize_srcu() to ensure that all tasks that were
853 // in the process of exiting (and which thus might not know to
854 // synchronize with this RCU Tasks grace period) have completed
855 // exiting.
856 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
857 // Scans the holdout list, attempting to identify a quiescent state
858 // for each task on the list. If there is a quiescent state, the
859 // corresponding task is removed from the holdout list.
860 // rcu_tasks_postgp():
861 // Invokes synchronize_rcu() in order to ensure that all prior
862 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
863 // to have happened before the end of this RCU Tasks grace period.
864 // Again, this works because all such transitions are carried out
865 // with interrupts disabled.
866 //
867 // For each exiting task, the exit_tasks_rcu_start() and
868 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
869 // read-side critical sections waited for by rcu_tasks_postscan().
870 //
871 // Pre-grace-period update-side code is ordered before the grace
872 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
873 // is ordered before the grace period via synchronize_rcu() call in
874 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
875 // disabling.
876
877 /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(struct list_head * hop)878 static void rcu_tasks_pregp_step(struct list_head *hop)
879 {
880 /*
881 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
882 * to complete. Invoking synchronize_rcu() suffices because all
883 * these transitions occur with interrupts disabled. Without this
884 * synchronize_rcu(), a read-side critical section that started
885 * before the grace period might be incorrectly seen as having
886 * started after the grace period.
887 *
888 * This synchronize_rcu() also dispenses with the need for a
889 * memory barrier on the first store to t->rcu_tasks_holdout,
890 * as it forces the store to happen after the beginning of the
891 * grace period.
892 */
893 synchronize_rcu();
894 }
895
896 /* Check for quiescent states since the pregp's synchronize_rcu() */
rcu_tasks_is_holdout(struct task_struct * t)897 static bool rcu_tasks_is_holdout(struct task_struct *t)
898 {
899 int cpu;
900
901 /* Has the task been seen voluntarily sleeping? */
902 if (!READ_ONCE(t->on_rq))
903 return false;
904
905 /*
906 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
907 * quiescent states. But CPU boot code performed by the idle task
908 * isn't a quiescent state.
909 */
910 if (is_idle_task(t))
911 return false;
912
913 cpu = task_cpu(t);
914
915 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
916 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
917 return false;
918
919 return true;
920 }
921
922 /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)923 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
924 {
925 if (t != current && rcu_tasks_is_holdout(t)) {
926 get_task_struct(t);
927 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
928 WRITE_ONCE(t->rcu_tasks_holdout, true);
929 list_add(&t->rcu_tasks_holdout_list, hop);
930 }
931 }
932
933 /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)934 static void rcu_tasks_postscan(struct list_head *hop)
935 {
936 int rtsi = READ_ONCE(rcu_task_stall_info);
937
938 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
939 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
940 add_timer(&tasks_rcu_exit_srcu_stall_timer);
941 }
942
943 /*
944 * Exiting tasks may escape the tasklist scan. Those are vulnerable
945 * until their final schedule() with TASK_DEAD state. To cope with
946 * this, divide the fragile exit path part in two intersecting
947 * read side critical sections:
948 *
949 * 1) An _SRCU_ read side starting before calling exit_notify(),
950 * which may remove the task from the tasklist, and ending after
951 * the final preempt_disable() call in do_exit().
952 *
953 * 2) An _RCU_ read side starting with the final preempt_disable()
954 * call in do_exit() and ending with the final call to schedule()
955 * with TASK_DEAD state.
956 *
957 * This handles the part 1). And postgp will handle part 2) with a
958 * call to synchronize_rcu().
959 */
960 synchronize_srcu(&tasks_rcu_exit_srcu);
961
962 if (!IS_ENABLED(CONFIG_TINY_RCU))
963 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
964 }
965
966 /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)967 static void check_holdout_task(struct task_struct *t,
968 bool needreport, bool *firstreport)
969 {
970 int cpu;
971
972 if (!READ_ONCE(t->rcu_tasks_holdout) ||
973 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
974 !rcu_tasks_is_holdout(t) ||
975 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
976 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
977 WRITE_ONCE(t->rcu_tasks_holdout, false);
978 list_del_init(&t->rcu_tasks_holdout_list);
979 put_task_struct(t);
980 return;
981 }
982 rcu_request_urgent_qs_task(t);
983 if (!needreport)
984 return;
985 if (*firstreport) {
986 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
987 *firstreport = false;
988 }
989 cpu = task_cpu(t);
990 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
991 t, ".I"[is_idle_task(t)],
992 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
993 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
994 t->rcu_tasks_idle_cpu, cpu);
995 sched_show_task(t);
996 }
997
998 /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)999 static void check_all_holdout_tasks(struct list_head *hop,
1000 bool needreport, bool *firstreport)
1001 {
1002 struct task_struct *t, *t1;
1003
1004 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1005 check_holdout_task(t, needreport, firstreport);
1006 cond_resched();
1007 }
1008 }
1009
1010 /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)1011 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1012 {
1013 /*
1014 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1015 * memory barriers prior to them in the schedule() path, memory
1016 * reordering on other CPUs could cause their RCU-tasks read-side
1017 * critical sections to extend past the end of the grace period.
1018 * However, because these ->nvcsw updates are carried out with
1019 * interrupts disabled, we can use synchronize_rcu() to force the
1020 * needed ordering on all such CPUs.
1021 *
1022 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1023 * accesses to be within the grace period, avoiding the need for
1024 * memory barriers for ->rcu_tasks_holdout accesses.
1025 *
1026 * In addition, this synchronize_rcu() waits for exiting tasks
1027 * to complete their final preempt_disable() region of execution,
1028 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
1029 * enforcing the whole region before tasklist removal until
1030 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1031 * read side critical section.
1032 */
1033 synchronize_rcu();
1034 }
1035
1036 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1037 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1038
tasks_rcu_exit_srcu_stall(struct timer_list * unused)1039 static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1040 {
1041 #ifndef CONFIG_TINY_RCU
1042 int rtsi;
1043
1044 rtsi = READ_ONCE(rcu_task_stall_info);
1045 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1046 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1047 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1048 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1049 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1050 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1051 #endif // #ifndef CONFIG_TINY_RCU
1052 }
1053
1054 /**
1055 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1056 * @rhp: structure to be used for queueing the RCU updates.
1057 * @func: actual callback function to be invoked after the grace period
1058 *
1059 * The callback function will be invoked some time after a full grace
1060 * period elapses, in other words after all currently executing RCU
1061 * read-side critical sections have completed. call_rcu_tasks() assumes
1062 * that the read-side critical sections end at a voluntary context
1063 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1064 * or transition to usermode execution. As such, there are no read-side
1065 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1066 * this primitive is intended to determine that all tasks have passed
1067 * through a safe state, not so much for data-structure synchronization.
1068 *
1069 * See the description of call_rcu() for more detailed information on
1070 * memory ordering guarantees.
1071 */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)1072 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1073 {
1074 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1075 }
1076 EXPORT_SYMBOL_GPL(call_rcu_tasks);
1077
1078 /**
1079 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1080 *
1081 * Control will return to the caller some time after a full rcu-tasks
1082 * grace period has elapsed, in other words after all currently
1083 * executing rcu-tasks read-side critical sections have elapsed. These
1084 * read-side critical sections are delimited by calls to schedule(),
1085 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1086 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1087 *
1088 * This is a very specialized primitive, intended only for a few uses in
1089 * tracing and other situations requiring manipulation of function
1090 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1091 * is not (yet) intended for heavy use from multiple CPUs.
1092 *
1093 * See the description of synchronize_rcu() for more detailed information
1094 * on memory ordering guarantees.
1095 */
synchronize_rcu_tasks(void)1096 void synchronize_rcu_tasks(void)
1097 {
1098 synchronize_rcu_tasks_generic(&rcu_tasks);
1099 }
1100 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1101
1102 /**
1103 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1104 *
1105 * Although the current implementation is guaranteed to wait, it is not
1106 * obligated to, for example, if there are no pending callbacks.
1107 */
rcu_barrier_tasks(void)1108 void rcu_barrier_tasks(void)
1109 {
1110 rcu_barrier_tasks_generic(&rcu_tasks);
1111 }
1112 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1113
1114 int rcu_tasks_lazy_ms = -1;
1115 module_param(rcu_tasks_lazy_ms, int, 0444);
1116
rcu_spawn_tasks_kthread(void)1117 static int __init rcu_spawn_tasks_kthread(void)
1118 {
1119 cblist_init_generic(&rcu_tasks);
1120 rcu_tasks.gp_sleep = HZ / 10;
1121 rcu_tasks.init_fract = HZ / 10;
1122 if (rcu_tasks_lazy_ms >= 0)
1123 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1124 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1125 rcu_tasks.pertask_func = rcu_tasks_pertask;
1126 rcu_tasks.postscan_func = rcu_tasks_postscan;
1127 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1128 rcu_tasks.postgp_func = rcu_tasks_postgp;
1129 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1130 return 0;
1131 }
1132
1133 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_classic_gp_kthread(void)1134 void show_rcu_tasks_classic_gp_kthread(void)
1135 {
1136 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1137 }
1138 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1139 #endif // !defined(CONFIG_TINY_RCU)
1140
get_rcu_tasks_gp_kthread(void)1141 struct task_struct *get_rcu_tasks_gp_kthread(void)
1142 {
1143 return rcu_tasks.kthread_ptr;
1144 }
1145 EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1146
1147 /*
1148 * Contribute to protect against tasklist scan blind spot while the
1149 * task is exiting and may be removed from the tasklist. See
1150 * corresponding synchronize_srcu() for further details.
1151 */
exit_tasks_rcu_start(void)1152 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
1153 {
1154 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
1155 }
1156
1157 /*
1158 * Contribute to protect against tasklist scan blind spot while the
1159 * task is exiting and may be removed from the tasklist. See
1160 * corresponding synchronize_srcu() for further details.
1161 */
exit_tasks_rcu_stop(void)1162 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
1163 {
1164 struct task_struct *t = current;
1165
1166 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1167 }
1168
1169 /*
1170 * Contribute to protect against tasklist scan blind spot while the
1171 * task is exiting and may be removed from the tasklist. See
1172 * corresponding synchronize_srcu() for further details.
1173 */
exit_tasks_rcu_finish(void)1174 void exit_tasks_rcu_finish(void)
1175 {
1176 exit_tasks_rcu_stop();
1177 exit_tasks_rcu_finish_trace(current);
1178 }
1179
1180 #else /* #ifdef CONFIG_TASKS_RCU */
exit_tasks_rcu_start(void)1181 void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)1182 void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)1183 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1184 #endif /* #else #ifdef CONFIG_TASKS_RCU */
1185
1186 #ifdef CONFIG_TASKS_RUDE_RCU
1187
1188 ////////////////////////////////////////////////////////////////////////
1189 //
1190 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1191 // passing an empty function to schedule_on_each_cpu(). This approach
1192 // provides an asynchronous call_rcu_tasks_rude() API and batching of
1193 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1194 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1195 // and induces otherwise unnecessary context switches on all online CPUs,
1196 // whether idle or not.
1197 //
1198 // Callback handling is provided by the rcu_tasks_kthread() function.
1199 //
1200 // Ordering is provided by the scheduler's context-switch code.
1201
1202 // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)1203 static void rcu_tasks_be_rude(struct work_struct *work)
1204 {
1205 }
1206
1207 // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)1208 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1209 {
1210 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1211 schedule_on_each_cpu(rcu_tasks_be_rude);
1212 }
1213
1214 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1215 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1216 "RCU Tasks Rude");
1217
1218 /**
1219 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1220 * @rhp: structure to be used for queueing the RCU updates.
1221 * @func: actual callback function to be invoked after the grace period
1222 *
1223 * The callback function will be invoked some time after a full grace
1224 * period elapses, in other words after all currently executing RCU
1225 * read-side critical sections have completed. call_rcu_tasks_rude()
1226 * assumes that the read-side critical sections end at context switch,
1227 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1228 * usermode execution is schedulable). As such, there are no read-side
1229 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1230 * this primitive is intended to determine that all tasks have passed
1231 * through a safe state, not so much for data-structure synchronization.
1232 *
1233 * See the description of call_rcu() for more detailed information on
1234 * memory ordering guarantees.
1235 */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)1236 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1237 {
1238 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1239 }
1240 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1241
1242 /**
1243 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1244 *
1245 * Control will return to the caller some time after a rude rcu-tasks
1246 * grace period has elapsed, in other words after all currently
1247 * executing rcu-tasks read-side critical sections have elapsed. These
1248 * read-side critical sections are delimited by calls to schedule(),
1249 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1250 * context), and (in theory, anyway) cond_resched().
1251 *
1252 * This is a very specialized primitive, intended only for a few uses in
1253 * tracing and other situations requiring manipulation of function preambles
1254 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1255 * (yet) intended for heavy use from multiple CPUs.
1256 *
1257 * See the description of synchronize_rcu() for more detailed information
1258 * on memory ordering guarantees.
1259 */
synchronize_rcu_tasks_rude(void)1260 void synchronize_rcu_tasks_rude(void)
1261 {
1262 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1263 }
1264 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1265
1266 /**
1267 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1268 *
1269 * Although the current implementation is guaranteed to wait, it is not
1270 * obligated to, for example, if there are no pending callbacks.
1271 */
rcu_barrier_tasks_rude(void)1272 void rcu_barrier_tasks_rude(void)
1273 {
1274 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1275 }
1276 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1277
1278 int rcu_tasks_rude_lazy_ms = -1;
1279 module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1280
rcu_spawn_tasks_rude_kthread(void)1281 static int __init rcu_spawn_tasks_rude_kthread(void)
1282 {
1283 cblist_init_generic(&rcu_tasks_rude);
1284 rcu_tasks_rude.gp_sleep = HZ / 10;
1285 if (rcu_tasks_rude_lazy_ms >= 0)
1286 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1287 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1288 return 0;
1289 }
1290
1291 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_rude_gp_kthread(void)1292 void show_rcu_tasks_rude_gp_kthread(void)
1293 {
1294 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1295 }
1296 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1297 #endif // !defined(CONFIG_TINY_RCU)
1298
get_rcu_tasks_rude_gp_kthread(void)1299 struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1300 {
1301 return rcu_tasks_rude.kthread_ptr;
1302 }
1303 EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1304
1305 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1306
1307 ////////////////////////////////////////////////////////////////////////
1308 //
1309 // Tracing variant of Tasks RCU. This variant is designed to be used
1310 // to protect tracing hooks, including those of BPF. This variant
1311 // therefore:
1312 //
1313 // 1. Has explicit read-side markers to allow finite grace periods
1314 // in the face of in-kernel loops for PREEMPT=n builds.
1315 //
1316 // 2. Protects code in the idle loop, exception entry/exit, and
1317 // CPU-hotplug code paths, similar to the capabilities of SRCU.
1318 //
1319 // 3. Avoids expensive read-side instructions, having overhead similar
1320 // to that of Preemptible RCU.
1321 //
1322 // There are of course downsides. For example, the grace-period code
1323 // can send IPIs to CPUs, even when those CPUs are in the idle loop or
1324 // in nohz_full userspace. If needed, these downsides can be at least
1325 // partially remedied.
1326 //
1327 // Perhaps most important, this variant of RCU does not affect the vanilla
1328 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1329 // readers can operate from idle, offline, and exception entry/exit in no
1330 // way allows rcu_preempt and rcu_sched readers to also do so.
1331 //
1332 // The implementation uses rcu_tasks_wait_gp(), which relies on function
1333 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1334 // function sets these function pointers up so that rcu_tasks_wait_gp()
1335 // invokes these functions in this order:
1336 //
1337 // rcu_tasks_trace_pregp_step():
1338 // Disables CPU hotplug, adds all currently executing tasks to the
1339 // holdout list, then checks the state of all tasks that blocked
1340 // or were preempted within their current RCU Tasks Trace read-side
1341 // critical section, adding them to the holdout list if appropriate.
1342 // Finally, this function re-enables CPU hotplug.
1343 // The ->pertask_func() pointer is NULL, so there is no per-task processing.
1344 // rcu_tasks_trace_postscan():
1345 // Invokes synchronize_rcu() to wait for late-stage exiting tasks
1346 // to finish exiting.
1347 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1348 // Scans the holdout list, attempting to identify a quiescent state
1349 // for each task on the list. If there is a quiescent state, the
1350 // corresponding task is removed from the holdout list. Once this
1351 // list is empty, the grace period has completed.
1352 // rcu_tasks_trace_postgp():
1353 // Provides the needed full memory barrier and does debug checks.
1354 //
1355 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1356 //
1357 // Pre-grace-period update-side code is ordered before the grace period
1358 // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1359 // read-side code is ordered before the grace period by atomic operations
1360 // on .b.need_qs flag of each task involved in this process, or by scheduler
1361 // context-switch ordering (for locked-down non-running readers).
1362
1363 // The lockdep state must be outside of #ifdef to be useful.
1364 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1365 static struct lock_class_key rcu_lock_trace_key;
1366 struct lockdep_map rcu_trace_lock_map =
1367 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1368 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1369 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1370
1371 #ifdef CONFIG_TASKS_TRACE_RCU
1372
1373 // Record outstanding IPIs to each CPU. No point in sending two...
1374 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1375
1376 // The number of detections of task quiescent state relying on
1377 // heavyweight readers executing explicit memory barriers.
1378 static unsigned long n_heavy_reader_attempts;
1379 static unsigned long n_heavy_reader_updates;
1380 static unsigned long n_heavy_reader_ofl_updates;
1381 static unsigned long n_trc_holdouts;
1382
1383 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1384 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1385 "RCU Tasks Trace");
1386
1387 /* Load from ->trc_reader_special.b.need_qs with proper ordering. */
rcu_ld_need_qs(struct task_struct * t)1388 static u8 rcu_ld_need_qs(struct task_struct *t)
1389 {
1390 smp_mb(); // Enforce full grace-period ordering.
1391 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1392 }
1393
1394 /* Store to ->trc_reader_special.b.need_qs with proper ordering. */
rcu_st_need_qs(struct task_struct * t,u8 v)1395 static void rcu_st_need_qs(struct task_struct *t, u8 v)
1396 {
1397 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1398 smp_mb(); // Enforce full grace-period ordering.
1399 }
1400
1401 /*
1402 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1403 * the four-byte operand-size restriction of some platforms.
1404 * Returns the old value, which is often ignored.
1405 */
rcu_trc_cmpxchg_need_qs(struct task_struct * t,u8 old,u8 new)1406 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1407 {
1408 union rcu_special ret;
1409 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1410 union rcu_special trs_new = trs_old;
1411
1412 if (trs_old.b.need_qs != old)
1413 return trs_old.b.need_qs;
1414 trs_new.b.need_qs = new;
1415 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1416 return ret.b.need_qs;
1417 }
1418 EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1419
1420 /*
1421 * If we are the last reader, signal the grace-period kthread.
1422 * Also remove from the per-CPU list of blocked tasks.
1423 */
rcu_read_unlock_trace_special(struct task_struct * t)1424 void rcu_read_unlock_trace_special(struct task_struct *t)
1425 {
1426 unsigned long flags;
1427 struct rcu_tasks_percpu *rtpcp;
1428 union rcu_special trs;
1429
1430 // Open-coded full-word version of rcu_ld_need_qs().
1431 smp_mb(); // Enforce full grace-period ordering.
1432 trs = smp_load_acquire(&t->trc_reader_special);
1433
1434 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1435 smp_mb(); // Pairs with update-side barriers.
1436 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1437 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1438 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1439 TRC_NEED_QS_CHECKED);
1440
1441 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1442 }
1443 if (trs.b.blocked) {
1444 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1445 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1446 list_del_init(&t->trc_blkd_node);
1447 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1448 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1449 }
1450 WRITE_ONCE(t->trc_reader_nesting, 0);
1451 }
1452 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1453
1454 /* Add a newly blocked reader task to its CPU's list. */
rcu_tasks_trace_qs_blkd(struct task_struct * t)1455 void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1456 {
1457 unsigned long flags;
1458 struct rcu_tasks_percpu *rtpcp;
1459
1460 local_irq_save(flags);
1461 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1462 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1463 t->trc_blkd_cpu = smp_processor_id();
1464 if (!rtpcp->rtp_blkd_tasks.next)
1465 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1466 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1467 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1468 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1469 }
1470 EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1471
1472 /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)1473 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1474 {
1475 if (list_empty(&t->trc_holdout_list)) {
1476 get_task_struct(t);
1477 list_add(&t->trc_holdout_list, bhp);
1478 n_trc_holdouts++;
1479 }
1480 }
1481
1482 /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)1483 static void trc_del_holdout(struct task_struct *t)
1484 {
1485 if (!list_empty(&t->trc_holdout_list)) {
1486 list_del_init(&t->trc_holdout_list);
1487 put_task_struct(t);
1488 n_trc_holdouts--;
1489 }
1490 }
1491
1492 /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)1493 static void trc_read_check_handler(void *t_in)
1494 {
1495 int nesting;
1496 struct task_struct *t = current;
1497 struct task_struct *texp = t_in;
1498
1499 // If the task is no longer running on this CPU, leave.
1500 if (unlikely(texp != t))
1501 goto reset_ipi; // Already on holdout list, so will check later.
1502
1503 // If the task is not in a read-side critical section, and
1504 // if this is the last reader, awaken the grace-period kthread.
1505 nesting = READ_ONCE(t->trc_reader_nesting);
1506 if (likely(!nesting)) {
1507 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1508 goto reset_ipi;
1509 }
1510 // If we are racing with an rcu_read_unlock_trace(), try again later.
1511 if (unlikely(nesting < 0))
1512 goto reset_ipi;
1513
1514 // Get here if the task is in a read-side critical section.
1515 // Set its state so that it will update state for the grace-period
1516 // kthread upon exit from that critical section.
1517 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1518
1519 reset_ipi:
1520 // Allow future IPIs to be sent on CPU and for task.
1521 // Also order this IPI handler against any later manipulations of
1522 // the intended task.
1523 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1524 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1525 }
1526
1527 /* Callback function for scheduler to check locked-down task. */
trc_inspect_reader(struct task_struct * t,void * bhp_in)1528 static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1529 {
1530 struct list_head *bhp = bhp_in;
1531 int cpu = task_cpu(t);
1532 int nesting;
1533 bool ofl = cpu_is_offline(cpu);
1534
1535 if (task_curr(t) && !ofl) {
1536 // If no chance of heavyweight readers, do it the hard way.
1537 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1538 return -EINVAL;
1539
1540 // If heavyweight readers are enabled on the remote task,
1541 // we can inspect its state despite its currently running.
1542 // However, we cannot safely change its state.
1543 n_heavy_reader_attempts++;
1544 // Check for "running" idle tasks on offline CPUs.
1545 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1546 return -EINVAL; // No quiescent state, do it the hard way.
1547 n_heavy_reader_updates++;
1548 nesting = 0;
1549 } else {
1550 // The task is not running, so C-language access is safe.
1551 nesting = t->trc_reader_nesting;
1552 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1553 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1554 n_heavy_reader_ofl_updates++;
1555 }
1556
1557 // If not exiting a read-side critical section, mark as checked
1558 // so that the grace-period kthread will remove it from the
1559 // holdout list.
1560 if (!nesting) {
1561 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1562 return 0; // In QS, so done.
1563 }
1564 if (nesting < 0)
1565 return -EINVAL; // Reader transitioning, try again later.
1566
1567 // The task is in a read-side critical section, so set up its
1568 // state so that it will update state upon exit from that critical
1569 // section.
1570 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1571 trc_add_holdout(t, bhp);
1572 return 0;
1573 }
1574
1575 /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)1576 static void trc_wait_for_one_reader(struct task_struct *t,
1577 struct list_head *bhp)
1578 {
1579 int cpu;
1580
1581 // If a previous IPI is still in flight, let it complete.
1582 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1583 return;
1584
1585 // The current task had better be in a quiescent state.
1586 if (t == current) {
1587 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1588 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1589 return;
1590 }
1591
1592 // Attempt to nail down the task for inspection.
1593 get_task_struct(t);
1594 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1595 put_task_struct(t);
1596 return;
1597 }
1598 put_task_struct(t);
1599
1600 // If this task is not yet on the holdout list, then we are in
1601 // an RCU read-side critical section. Otherwise, the invocation of
1602 // trc_add_holdout() that added it to the list did the necessary
1603 // get_task_struct(). Either way, the task cannot be freed out
1604 // from under this code.
1605
1606 // If currently running, send an IPI, either way, add to list.
1607 trc_add_holdout(t, bhp);
1608 if (task_curr(t) &&
1609 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1610 // The task is currently running, so try IPIing it.
1611 cpu = task_cpu(t);
1612
1613 // If there is already an IPI outstanding, let it happen.
1614 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1615 return;
1616
1617 per_cpu(trc_ipi_to_cpu, cpu) = true;
1618 t->trc_ipi_to_cpu = cpu;
1619 rcu_tasks_trace.n_ipis++;
1620 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1621 // Just in case there is some other reason for
1622 // failure than the target CPU being offline.
1623 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1624 __func__, cpu);
1625 rcu_tasks_trace.n_ipis_fails++;
1626 per_cpu(trc_ipi_to_cpu, cpu) = false;
1627 t->trc_ipi_to_cpu = -1;
1628 }
1629 }
1630 }
1631
1632 /*
1633 * Initialize for first-round processing for the specified task.
1634 * Return false if task is NULL or already taken care of, true otherwise.
1635 */
rcu_tasks_trace_pertask_prep(struct task_struct * t,bool notself)1636 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1637 {
1638 // During early boot when there is only the one boot CPU, there
1639 // is no idle task for the other CPUs. Also, the grace-period
1640 // kthread is always in a quiescent state. In addition, just return
1641 // if this task is already on the list.
1642 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1643 return false;
1644
1645 rcu_st_need_qs(t, 0);
1646 t->trc_ipi_to_cpu = -1;
1647 return true;
1648 }
1649
1650 /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)1651 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1652 {
1653 if (rcu_tasks_trace_pertask_prep(t, true))
1654 trc_wait_for_one_reader(t, hop);
1655 }
1656
1657 /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(struct list_head * hop)1658 static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1659 {
1660 LIST_HEAD(blkd_tasks);
1661 int cpu;
1662 unsigned long flags;
1663 struct rcu_tasks_percpu *rtpcp;
1664 struct task_struct *t;
1665
1666 // There shouldn't be any old IPIs, but...
1667 for_each_possible_cpu(cpu)
1668 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1669
1670 // Disable CPU hotplug across the CPU scan for the benefit of
1671 // any IPIs that might be needed. This also waits for all readers
1672 // in CPU-hotplug code paths.
1673 cpus_read_lock();
1674
1675 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1676 // allow safe access to the hop list.
1677 for_each_online_cpu(cpu) {
1678 rcu_read_lock();
1679 // Note that cpu_curr_snapshot() picks up the target
1680 // CPU's current task while its runqueue is locked with
1681 // an smp_mb__after_spinlock(). This ensures that either
1682 // the grace-period kthread will see that task's read-side
1683 // critical section or the task will see the updater's pre-GP
1684 // accesses. The trailing smp_mb() in cpu_curr_snapshot()
1685 // does not currently play a role other than simplify
1686 // that function's ordering semantics. If these simplified
1687 // ordering semantics continue to be redundant, that smp_mb()
1688 // might be removed.
1689 t = cpu_curr_snapshot(cpu);
1690 if (rcu_tasks_trace_pertask_prep(t, true))
1691 trc_add_holdout(t, hop);
1692 rcu_read_unlock();
1693 cond_resched_tasks_rcu_qs();
1694 }
1695
1696 // Only after all running tasks have been accounted for is it
1697 // safe to take care of the tasks that have blocked within their
1698 // current RCU tasks trace read-side critical section.
1699 for_each_possible_cpu(cpu) {
1700 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1701 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1702 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1703 while (!list_empty(&blkd_tasks)) {
1704 rcu_read_lock();
1705 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1706 list_del_init(&t->trc_blkd_node);
1707 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1708 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1709 rcu_tasks_trace_pertask(t, hop);
1710 rcu_read_unlock();
1711 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1712 }
1713 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1714 cond_resched_tasks_rcu_qs();
1715 }
1716
1717 // Re-enable CPU hotplug now that the holdout list is populated.
1718 cpus_read_unlock();
1719 }
1720
1721 /*
1722 * Do intermediate processing between task and holdout scans.
1723 */
rcu_tasks_trace_postscan(struct list_head * hop)1724 static void rcu_tasks_trace_postscan(struct list_head *hop)
1725 {
1726 // Wait for late-stage exiting tasks to finish exiting.
1727 // These might have passed the call to exit_tasks_rcu_finish().
1728
1729 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1730 synchronize_rcu();
1731 // Any tasks that exit after this point will set
1732 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1733 }
1734
1735 /* Communicate task state back to the RCU tasks trace stall warning request. */
1736 struct trc_stall_chk_rdr {
1737 int nesting;
1738 int ipi_to_cpu;
1739 u8 needqs;
1740 };
1741
trc_check_slow_task(struct task_struct * t,void * arg)1742 static int trc_check_slow_task(struct task_struct *t, void *arg)
1743 {
1744 struct trc_stall_chk_rdr *trc_rdrp = arg;
1745
1746 if (task_curr(t) && cpu_online(task_cpu(t)))
1747 return false; // It is running, so decline to inspect it.
1748 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1749 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1750 trc_rdrp->needqs = rcu_ld_need_qs(t);
1751 return true;
1752 }
1753
1754 /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)1755 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1756 {
1757 int cpu;
1758 struct trc_stall_chk_rdr trc_rdr;
1759 bool is_idle_tsk = is_idle_task(t);
1760
1761 if (*firstreport) {
1762 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1763 *firstreport = false;
1764 }
1765 cpu = task_cpu(t);
1766 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1767 pr_alert("P%d: %c%c\n",
1768 t->pid,
1769 ".I"[t->trc_ipi_to_cpu >= 0],
1770 ".i"[is_idle_tsk]);
1771 else
1772 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1773 t->pid,
1774 ".I"[trc_rdr.ipi_to_cpu >= 0],
1775 ".i"[is_idle_tsk],
1776 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1777 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1778 trc_rdr.nesting,
1779 " !CN"[trc_rdr.needqs & 0x3],
1780 " ?"[trc_rdr.needqs > 0x3],
1781 cpu, cpu_online(cpu) ? "" : "(offline)");
1782 sched_show_task(t);
1783 }
1784
1785 /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)1786 static void show_stalled_ipi_trace(void)
1787 {
1788 int cpu;
1789
1790 for_each_possible_cpu(cpu)
1791 if (per_cpu(trc_ipi_to_cpu, cpu))
1792 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1793 }
1794
1795 /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1796 static void check_all_holdout_tasks_trace(struct list_head *hop,
1797 bool needreport, bool *firstreport)
1798 {
1799 struct task_struct *g, *t;
1800
1801 // Disable CPU hotplug across the holdout list scan for IPIs.
1802 cpus_read_lock();
1803
1804 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1805 // If safe and needed, try to check the current task.
1806 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1807 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1808 trc_wait_for_one_reader(t, hop);
1809
1810 // If check succeeded, remove this task from the list.
1811 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1812 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1813 trc_del_holdout(t);
1814 else if (needreport)
1815 show_stalled_task_trace(t, firstreport);
1816 cond_resched_tasks_rcu_qs();
1817 }
1818
1819 // Re-enable CPU hotplug now that the holdout list scan has completed.
1820 cpus_read_unlock();
1821
1822 if (needreport) {
1823 if (*firstreport)
1824 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1825 show_stalled_ipi_trace();
1826 }
1827 }
1828
rcu_tasks_trace_empty_fn(void * unused)1829 static void rcu_tasks_trace_empty_fn(void *unused)
1830 {
1831 }
1832
1833 /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1834 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1835 {
1836 int cpu;
1837
1838 // Wait for any lingering IPI handlers to complete. Note that
1839 // if a CPU has gone offline or transitioned to userspace in the
1840 // meantime, all IPI handlers should have been drained beforehand.
1841 // Yes, this assumes that CPUs process IPIs in order. If that ever
1842 // changes, there will need to be a recheck and/or timed wait.
1843 for_each_online_cpu(cpu)
1844 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1845 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1846
1847 smp_mb(); // Caller's code must be ordered after wakeup.
1848 // Pairs with pretty much every ordering primitive.
1849 }
1850
1851 /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)1852 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1853 {
1854 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1855
1856 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1857 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1858 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1859 rcu_read_unlock_trace_special(t);
1860 else
1861 WRITE_ONCE(t->trc_reader_nesting, 0);
1862 }
1863
1864 /**
1865 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1866 * @rhp: structure to be used for queueing the RCU updates.
1867 * @func: actual callback function to be invoked after the grace period
1868 *
1869 * The callback function will be invoked some time after a trace rcu-tasks
1870 * grace period elapses, in other words after all currently executing
1871 * trace rcu-tasks read-side critical sections have completed. These
1872 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1873 * and rcu_read_unlock_trace().
1874 *
1875 * See the description of call_rcu() for more detailed information on
1876 * memory ordering guarantees.
1877 */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1878 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1879 {
1880 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1881 }
1882 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1883
1884 /**
1885 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1886 *
1887 * Control will return to the caller some time after a trace rcu-tasks
1888 * grace period has elapsed, in other words after all currently executing
1889 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1890 * critical sections are delimited by calls to rcu_read_lock_trace()
1891 * and rcu_read_unlock_trace().
1892 *
1893 * This is a very specialized primitive, intended only for a few uses in
1894 * tracing and other situations requiring manipulation of function preambles
1895 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1896 * (yet) intended for heavy use from multiple CPUs.
1897 *
1898 * See the description of synchronize_rcu() for more detailed information
1899 * on memory ordering guarantees.
1900 */
synchronize_rcu_tasks_trace(void)1901 void synchronize_rcu_tasks_trace(void)
1902 {
1903 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1904 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1905 }
1906 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1907
1908 /**
1909 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1910 *
1911 * Although the current implementation is guaranteed to wait, it is not
1912 * obligated to, for example, if there are no pending callbacks.
1913 */
rcu_barrier_tasks_trace(void)1914 void rcu_barrier_tasks_trace(void)
1915 {
1916 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1917 }
1918 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1919
1920 int rcu_tasks_trace_lazy_ms = -1;
1921 module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1922
rcu_spawn_tasks_trace_kthread(void)1923 static int __init rcu_spawn_tasks_trace_kthread(void)
1924 {
1925 cblist_init_generic(&rcu_tasks_trace);
1926 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1927 rcu_tasks_trace.gp_sleep = HZ / 10;
1928 rcu_tasks_trace.init_fract = HZ / 10;
1929 } else {
1930 rcu_tasks_trace.gp_sleep = HZ / 200;
1931 if (rcu_tasks_trace.gp_sleep <= 0)
1932 rcu_tasks_trace.gp_sleep = 1;
1933 rcu_tasks_trace.init_fract = HZ / 200;
1934 if (rcu_tasks_trace.init_fract <= 0)
1935 rcu_tasks_trace.init_fract = 1;
1936 }
1937 if (rcu_tasks_trace_lazy_ms >= 0)
1938 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1939 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1940 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1941 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1942 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1943 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1944 return 0;
1945 }
1946
1947 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_trace_gp_kthread(void)1948 void show_rcu_tasks_trace_gp_kthread(void)
1949 {
1950 char buf[64];
1951
1952 snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
1953 data_race(n_trc_holdouts),
1954 data_race(n_heavy_reader_ofl_updates),
1955 data_race(n_heavy_reader_updates),
1956 data_race(n_heavy_reader_attempts));
1957 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1958 }
1959 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1960 #endif // !defined(CONFIG_TINY_RCU)
1961
get_rcu_tasks_trace_gp_kthread(void)1962 struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
1963 {
1964 return rcu_tasks_trace.kthread_ptr;
1965 }
1966 EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
1967
1968 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)1969 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1970 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1971
1972 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)1973 void show_rcu_tasks_gp_kthreads(void)
1974 {
1975 show_rcu_tasks_classic_gp_kthread();
1976 show_rcu_tasks_rude_gp_kthread();
1977 show_rcu_tasks_trace_gp_kthread();
1978 }
1979 #endif /* #ifndef CONFIG_TINY_RCU */
1980
1981 #ifdef CONFIG_PROVE_RCU
1982 struct rcu_tasks_test_desc {
1983 struct rcu_head rh;
1984 const char *name;
1985 bool notrun;
1986 unsigned long runstart;
1987 };
1988
1989 static struct rcu_tasks_test_desc tests[] = {
1990 {
1991 .name = "call_rcu_tasks()",
1992 /* If not defined, the test is skipped. */
1993 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
1994 },
1995 {
1996 .name = "call_rcu_tasks_rude()",
1997 /* If not defined, the test is skipped. */
1998 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1999 },
2000 {
2001 .name = "call_rcu_tasks_trace()",
2002 /* If not defined, the test is skipped. */
2003 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2004 }
2005 };
2006
test_rcu_tasks_callback(struct rcu_head * rhp)2007 static void test_rcu_tasks_callback(struct rcu_head *rhp)
2008 {
2009 struct rcu_tasks_test_desc *rttd =
2010 container_of(rhp, struct rcu_tasks_test_desc, rh);
2011
2012 pr_info("Callback from %s invoked.\n", rttd->name);
2013
2014 rttd->notrun = false;
2015 }
2016
rcu_tasks_initiate_self_tests(void)2017 static void rcu_tasks_initiate_self_tests(void)
2018 {
2019 pr_info("Running RCU-tasks wait API self tests\n");
2020 #ifdef CONFIG_TASKS_RCU
2021 tests[0].runstart = jiffies;
2022 synchronize_rcu_tasks();
2023 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2024 #endif
2025
2026 #ifdef CONFIG_TASKS_RUDE_RCU
2027 tests[1].runstart = jiffies;
2028 synchronize_rcu_tasks_rude();
2029 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2030 #endif
2031
2032 #ifdef CONFIG_TASKS_TRACE_RCU
2033 tests[2].runstart = jiffies;
2034 synchronize_rcu_tasks_trace();
2035 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2036 #endif
2037 }
2038
2039 /*
2040 * Return: 0 - test passed
2041 * 1 - test failed, but have not timed out yet
2042 * -1 - test failed and timed out
2043 */
rcu_tasks_verify_self_tests(void)2044 static int rcu_tasks_verify_self_tests(void)
2045 {
2046 int ret = 0;
2047 int i;
2048 unsigned long bst = rcu_task_stall_timeout;
2049
2050 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2051 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2052 for (i = 0; i < ARRAY_SIZE(tests); i++) {
2053 while (tests[i].notrun) { // still hanging.
2054 if (time_after(jiffies, tests[i].runstart + bst)) {
2055 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2056 ret = -1;
2057 break;
2058 }
2059 ret = 1;
2060 break;
2061 }
2062 }
2063 WARN_ON(ret < 0);
2064
2065 return ret;
2066 }
2067
2068 /*
2069 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2070 * test passes or has timed out.
2071 */
2072 static struct delayed_work rcu_tasks_verify_work;
rcu_tasks_verify_work_fn(struct work_struct * work __maybe_unused)2073 static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2074 {
2075 int ret = rcu_tasks_verify_self_tests();
2076
2077 if (ret <= 0)
2078 return;
2079
2080 /* Test fails but not timed out yet, reschedule another check */
2081 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2082 }
2083
rcu_tasks_verify_schedule_work(void)2084 static int rcu_tasks_verify_schedule_work(void)
2085 {
2086 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2087 rcu_tasks_verify_work_fn(NULL);
2088 return 0;
2089 }
2090 late_initcall(rcu_tasks_verify_schedule_work);
2091 #else /* #ifdef CONFIG_PROVE_RCU */
rcu_tasks_initiate_self_tests(void)2092 static void rcu_tasks_initiate_self_tests(void) { }
2093 #endif /* #else #ifdef CONFIG_PROVE_RCU */
2094
rcu_init_tasks_generic(void)2095 void __init rcu_init_tasks_generic(void)
2096 {
2097 #ifdef CONFIG_TASKS_RCU
2098 rcu_spawn_tasks_kthread();
2099 #endif
2100
2101 #ifdef CONFIG_TASKS_RUDE_RCU
2102 rcu_spawn_tasks_rude_kthread();
2103 #endif
2104
2105 #ifdef CONFIG_TASKS_TRACE_RCU
2106 rcu_spawn_tasks_trace_kthread();
2107 #endif
2108
2109 // Run the self-tests.
2110 rcu_tasks_initiate_self_tests();
2111 }
2112
2113 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)2114 static inline void rcu_tasks_bootup_oddness(void) {}
2115 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
2116