1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Task-based RCU implementations. 4 * 5 * Copyright (C) 2020 Paul E. McKenney 6 */ 7 8 #ifdef CONFIG_TASKS_RCU_GENERIC 9 #include "rcu_segcblist.h" 10 11 //////////////////////////////////////////////////////////////////////// 12 // 13 // Generic data structures. 14 15 struct rcu_tasks; 16 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); 17 typedef void (*pregp_func_t)(void); 18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); 19 typedef void (*postscan_func_t)(struct list_head *hop); 20 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); 21 typedef void (*postgp_func_t)(struct rcu_tasks *rtp); 22 23 /** 24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. 25 * @cblist: Callback list. 26 * @lock: Lock protecting per-CPU callback list. 27 * @rtp_jiffies: Jiffies counter value for statistics. 28 * @rtp_n_lock_retries: Rough lock-contention statistic. 29 * @rtp_work: Work queue for invoking callbacks. 30 * @rtp_irq_work: IRQ work queue for deferred wakeups. 31 * @barrier_q_head: RCU callback for barrier operation. 32 * @cpu: CPU number corresponding to this entry. 33 * @rtpp: Pointer to the rcu_tasks structure. 34 */ 35 struct rcu_tasks_percpu { 36 struct rcu_segcblist cblist; 37 raw_spinlock_t __private lock; 38 unsigned long rtp_jiffies; 39 unsigned long rtp_n_lock_retries; 40 struct work_struct rtp_work; 41 struct irq_work rtp_irq_work; 42 struct rcu_head barrier_q_head; 43 int cpu; 44 struct rcu_tasks *rtpp; 45 }; 46 47 /** 48 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. 49 * @cbs_wq: Wait queue allowing new callback to get kthread's attention. 50 * @cbs_gbl_lock: Lock protecting callback list. 51 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. 52 * @gp_func: This flavor's grace-period-wait function. 53 * @gp_state: Grace period's most recent state transition (debugging). 54 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. 55 * @init_fract: Initial backoff sleep interval. 56 * @gp_jiffies: Time of last @gp_state transition. 57 * @gp_start: Most recent grace-period start in jiffies. 58 * @tasks_gp_seq: Number of grace periods completed since boot. 59 * @n_ipis: Number of IPIs sent to encourage grace periods to end. 60 * @n_ipis_fails: Number of IPI-send failures. 61 * @pregp_func: This flavor's pre-grace-period function (optional). 62 * @pertask_func: This flavor's per-task scan function (optional). 63 * @postscan_func: This flavor's post-task scan function (optional). 64 * @holdouts_func: This flavor's holdout-list scan function (optional). 65 * @postgp_func: This flavor's post-grace-period function (optional). 66 * @call_func: This flavor's call_rcu()-equivalent function. 67 * @rtpcpu: This flavor's rcu_tasks_percpu structure. 68 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. 69 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. 70 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. 71 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. 72 * @barrier_q_mutex: Serialize barrier operations. 73 * @barrier_q_count: Number of queues being waited on. 74 * @barrier_q_completion: Barrier wait/wakeup mechanism. 75 * @barrier_q_seq: Sequence number for barrier operations. 76 * @name: This flavor's textual name. 77 * @kname: This flavor's kthread name. 78 */ 79 struct rcu_tasks { 80 struct wait_queue_head cbs_wq; 81 raw_spinlock_t cbs_gbl_lock; 82 int gp_state; 83 int gp_sleep; 84 int init_fract; 85 unsigned long gp_jiffies; 86 unsigned long gp_start; 87 unsigned long tasks_gp_seq; 88 unsigned long n_ipis; 89 unsigned long n_ipis_fails; 90 struct task_struct *kthread_ptr; 91 rcu_tasks_gp_func_t gp_func; 92 pregp_func_t pregp_func; 93 pertask_func_t pertask_func; 94 postscan_func_t postscan_func; 95 holdouts_func_t holdouts_func; 96 postgp_func_t postgp_func; 97 call_rcu_func_t call_func; 98 struct rcu_tasks_percpu __percpu *rtpcpu; 99 int percpu_enqueue_shift; 100 int percpu_enqueue_lim; 101 int percpu_dequeue_lim; 102 unsigned long percpu_dequeue_gpseq; 103 struct mutex barrier_q_mutex; 104 atomic_t barrier_q_count; 105 struct completion barrier_q_completion; 106 unsigned long barrier_q_seq; 107 char *name; 108 char *kname; 109 }; 110 111 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); 112 113 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ 114 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ 115 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ 116 .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \ 117 }; \ 118 static struct rcu_tasks rt_name = \ 119 { \ 120 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ 121 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ 122 .gp_func = gp, \ 123 .call_func = call, \ 124 .rtpcpu = &rt_name ## __percpu, \ 125 .name = n, \ 126 .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \ 127 .percpu_enqueue_lim = 1, \ 128 .percpu_dequeue_lim = 1, \ 129 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ 130 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ 131 .kname = #rt_name, \ 132 } 133 134 /* Track exiting tasks in order to allow them to be waited for. */ 135 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); 136 137 /* Avoid IPIing CPUs early in the grace period. */ 138 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) 139 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; 140 module_param(rcu_task_ipi_delay, int, 0644); 141 142 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ 143 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) 144 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; 145 module_param(rcu_task_stall_timeout, int, 0644); 146 147 static int rcu_task_enqueue_lim __read_mostly = -1; 148 module_param(rcu_task_enqueue_lim, int, 0444); 149 150 static bool rcu_task_cb_adjust; 151 static int rcu_task_contend_lim __read_mostly = 100; 152 module_param(rcu_task_contend_lim, int, 0444); 153 static int rcu_task_collapse_lim __read_mostly = 10; 154 module_param(rcu_task_collapse_lim, int, 0444); 155 156 /* RCU tasks grace-period state for debugging. */ 157 #define RTGS_INIT 0 158 #define RTGS_WAIT_WAIT_CBS 1 159 #define RTGS_WAIT_GP 2 160 #define RTGS_PRE_WAIT_GP 3 161 #define RTGS_SCAN_TASKLIST 4 162 #define RTGS_POST_SCAN_TASKLIST 5 163 #define RTGS_WAIT_SCAN_HOLDOUTS 6 164 #define RTGS_SCAN_HOLDOUTS 7 165 #define RTGS_POST_GP 8 166 #define RTGS_WAIT_READERS 9 167 #define RTGS_INVOKE_CBS 10 168 #define RTGS_WAIT_CBS 11 169 #ifndef CONFIG_TINY_RCU 170 static const char * const rcu_tasks_gp_state_names[] = { 171 "RTGS_INIT", 172 "RTGS_WAIT_WAIT_CBS", 173 "RTGS_WAIT_GP", 174 "RTGS_PRE_WAIT_GP", 175 "RTGS_SCAN_TASKLIST", 176 "RTGS_POST_SCAN_TASKLIST", 177 "RTGS_WAIT_SCAN_HOLDOUTS", 178 "RTGS_SCAN_HOLDOUTS", 179 "RTGS_POST_GP", 180 "RTGS_WAIT_READERS", 181 "RTGS_INVOKE_CBS", 182 "RTGS_WAIT_CBS", 183 }; 184 #endif /* #ifndef CONFIG_TINY_RCU */ 185 186 //////////////////////////////////////////////////////////////////////// 187 // 188 // Generic code. 189 190 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); 191 192 /* Record grace-period phase and time. */ 193 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) 194 { 195 rtp->gp_state = newstate; 196 rtp->gp_jiffies = jiffies; 197 } 198 199 #ifndef CONFIG_TINY_RCU 200 /* Return state name. */ 201 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) 202 { 203 int i = data_race(rtp->gp_state); // Let KCSAN detect update races 204 int j = READ_ONCE(i); // Prevent the compiler from reading twice 205 206 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) 207 return "???"; 208 return rcu_tasks_gp_state_names[j]; 209 } 210 #endif /* #ifndef CONFIG_TINY_RCU */ 211 212 // Initialize per-CPU callback lists for the specified flavor of 213 // Tasks RCU. 214 static void cblist_init_generic(struct rcu_tasks *rtp) 215 { 216 int cpu; 217 unsigned long flags; 218 int lim; 219 220 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 221 if (rcu_task_enqueue_lim < 0) { 222 rcu_task_enqueue_lim = 1; 223 rcu_task_cb_adjust = true; 224 pr_info("%s: Setting adjustable number of callback queues.\n", __func__); 225 } else if (rcu_task_enqueue_lim == 0) { 226 rcu_task_enqueue_lim = 1; 227 } 228 lim = rcu_task_enqueue_lim; 229 230 if (lim > nr_cpu_ids) 231 lim = nr_cpu_ids; 232 WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim)); 233 WRITE_ONCE(rtp->percpu_dequeue_lim, lim); 234 smp_store_release(&rtp->percpu_enqueue_lim, lim); 235 for_each_possible_cpu(cpu) { 236 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 237 238 WARN_ON_ONCE(!rtpcp); 239 if (cpu) 240 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); 241 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 242 if (rcu_segcblist_empty(&rtpcp->cblist)) 243 rcu_segcblist_init(&rtpcp->cblist); 244 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); 245 rtpcp->cpu = cpu; 246 rtpcp->rtpp = rtp; 247 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. 248 } 249 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 250 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); 251 } 252 253 // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). 254 static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) 255 { 256 struct rcu_tasks *rtp; 257 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); 258 259 rtp = rtpcp->rtpp; 260 wake_up(&rtp->cbs_wq); 261 } 262 263 // Enqueue a callback for the specified flavor of Tasks RCU. 264 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, 265 struct rcu_tasks *rtp) 266 { 267 unsigned long flags; 268 unsigned long j; 269 bool needadjust = false; 270 bool needwake; 271 struct rcu_tasks_percpu *rtpcp; 272 273 rhp->next = NULL; 274 rhp->func = func; 275 local_irq_save(flags); 276 rcu_read_lock(); 277 rtpcp = per_cpu_ptr(rtp->rtpcpu, 278 smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift)); 279 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. 280 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 281 j = jiffies; 282 if (rtpcp->rtp_jiffies != j) { 283 rtpcp->rtp_jiffies = j; 284 rtpcp->rtp_n_lock_retries = 0; 285 } 286 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && 287 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) 288 needadjust = true; // Defer adjustment to avoid deadlock. 289 } 290 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) { 291 raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. 292 cblist_init_generic(rtp); 293 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. 294 } 295 needwake = rcu_segcblist_empty(&rtpcp->cblist); 296 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); 297 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 298 if (unlikely(needadjust)) { 299 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 300 if (rtp->percpu_enqueue_lim != nr_cpu_ids) { 301 WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); 302 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); 303 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); 304 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); 305 } 306 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 307 } 308 rcu_read_unlock(); 309 /* We can't create the thread unless interrupts are enabled. */ 310 if (needwake && READ_ONCE(rtp->kthread_ptr)) 311 irq_work_queue(&rtpcp->rtp_irq_work); 312 } 313 314 // Wait for a grace period for the specified flavor of Tasks RCU. 315 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) 316 { 317 /* Complain if the scheduler has not started. */ 318 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, 319 "synchronize_rcu_tasks called too soon"); 320 321 /* Wait for the grace period. */ 322 wait_rcu_gp(rtp->call_func); 323 } 324 325 // RCU callback function for rcu_barrier_tasks_generic(). 326 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) 327 { 328 struct rcu_tasks *rtp; 329 struct rcu_tasks_percpu *rtpcp; 330 331 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); 332 rtp = rtpcp->rtpp; 333 if (atomic_dec_and_test(&rtp->barrier_q_count)) 334 complete(&rtp->barrier_q_completion); 335 } 336 337 // Wait for all in-flight callbacks for the specified RCU Tasks flavor. 338 // Operates in a manner similar to rcu_barrier(). 339 static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) 340 { 341 int cpu; 342 unsigned long flags; 343 struct rcu_tasks_percpu *rtpcp; 344 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq); 345 346 mutex_lock(&rtp->barrier_q_mutex); 347 if (rcu_seq_done(&rtp->barrier_q_seq, s)) { 348 smp_mb(); 349 mutex_unlock(&rtp->barrier_q_mutex); 350 return; 351 } 352 rcu_seq_start(&rtp->barrier_q_seq); 353 init_completion(&rtp->barrier_q_completion); 354 atomic_set(&rtp->barrier_q_count, 2); 355 for_each_possible_cpu(cpu) { 356 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) 357 break; 358 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 359 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; 360 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 361 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head)) 362 atomic_inc(&rtp->barrier_q_count); 363 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 364 } 365 if (atomic_sub_and_test(2, &rtp->barrier_q_count)) 366 complete(&rtp->barrier_q_completion); 367 wait_for_completion(&rtp->barrier_q_completion); 368 rcu_seq_end(&rtp->barrier_q_seq); 369 mutex_unlock(&rtp->barrier_q_mutex); 370 } 371 372 // Advance callbacks and indicate whether either a grace period or 373 // callback invocation is needed. 374 static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) 375 { 376 int cpu; 377 unsigned long flags; 378 long n; 379 long ncbs = 0; 380 long ncbsnz = 0; 381 int needgpcb = 0; 382 383 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) { 384 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); 385 386 /* Advance and accelerate any new callbacks. */ 387 if (!rcu_segcblist_n_cbs(&rtpcp->cblist)) 388 continue; 389 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 390 // Should we shrink down to a single callback queue? 391 n = rcu_segcblist_n_cbs(&rtpcp->cblist); 392 if (n) { 393 ncbs += n; 394 if (cpu > 0) 395 ncbsnz += n; 396 } 397 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 398 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 399 if (rcu_segcblist_pend_cbs(&rtpcp->cblist)) 400 needgpcb |= 0x3; 401 if (!rcu_segcblist_empty(&rtpcp->cblist)) 402 needgpcb |= 0x1; 403 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 404 } 405 406 // Shrink down to a single callback queue if appropriate. 407 // This is done in two stages: (1) If there are no more than 408 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other 409 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, 410 // if there has not been an increase in callbacks, limit dequeuing 411 // to CPU 0. Note the matching RCU read-side critical section in 412 // call_rcu_tasks_generic(). 413 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { 414 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 415 if (rtp->percpu_enqueue_lim > 1) { 416 WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids)); 417 smp_store_release(&rtp->percpu_enqueue_lim, 1); 418 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); 419 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); 420 } 421 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 422 } 423 if (rcu_task_cb_adjust && !ncbsnz && 424 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) { 425 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); 426 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { 427 WRITE_ONCE(rtp->percpu_dequeue_lim, 1); 428 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); 429 } 430 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); 431 } 432 433 return needgpcb; 434 } 435 436 // Advance callbacks and invoke any that are ready. 437 static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) 438 { 439 int cpu; 440 int cpunext; 441 unsigned long flags; 442 int len; 443 struct rcu_head *rhp; 444 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); 445 struct rcu_tasks_percpu *rtpcp_next; 446 447 cpu = rtpcp->cpu; 448 cpunext = cpu * 2 + 1; 449 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 450 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 451 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); 452 cpunext++; 453 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { 454 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); 455 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work); 456 } 457 } 458 459 if (rcu_segcblist_empty(&rtpcp->cblist)) 460 return; 461 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 462 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq)); 463 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl); 464 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 465 len = rcl.len; 466 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { 467 local_bh_disable(); 468 rhp->func(rhp); 469 local_bh_enable(); 470 cond_resched(); 471 } 472 raw_spin_lock_irqsave_rcu_node(rtpcp, flags); 473 rcu_segcblist_add_len(&rtpcp->cblist, -len); 474 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq)); 475 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); 476 } 477 478 // Workqueue flood to advance callbacks and invoke any that are ready. 479 static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) 480 { 481 struct rcu_tasks *rtp; 482 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); 483 484 rtp = rtpcp->rtpp; 485 rcu_tasks_invoke_cbs(rtp, rtpcp); 486 } 487 488 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ 489 static int __noreturn rcu_tasks_kthread(void *arg) 490 { 491 int needgpcb; 492 struct rcu_tasks *rtp = arg; 493 494 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ 495 housekeeping_affine(current, HK_FLAG_RCU); 496 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start! 497 498 /* 499 * Each pass through the following loop makes one check for 500 * newly arrived callbacks, and, if there are some, waits for 501 * one RCU-tasks grace period and then invokes the callbacks. 502 * This loop is terminated by the system going down. ;-) 503 */ 504 for (;;) { 505 set_tasks_gp_state(rtp, RTGS_WAIT_CBS); 506 507 /* If there were none, wait a bit and start over. */ 508 wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp))); 509 510 if (needgpcb & 0x2) { 511 // Wait for one grace period. 512 set_tasks_gp_state(rtp, RTGS_WAIT_GP); 513 rtp->gp_start = jiffies; 514 rcu_seq_start(&rtp->tasks_gp_seq); 515 rtp->gp_func(rtp); 516 rcu_seq_end(&rtp->tasks_gp_seq); 517 } 518 519 /* Invoke callbacks. */ 520 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); 521 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); 522 523 /* Paranoid sleep to keep this from entering a tight loop */ 524 schedule_timeout_idle(rtp->gp_sleep); 525 } 526 } 527 528 /* Spawn RCU-tasks grace-period kthread. */ 529 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) 530 { 531 struct task_struct *t; 532 533 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); 534 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) 535 return; 536 smp_mb(); /* Ensure others see full kthread. */ 537 } 538 539 #ifndef CONFIG_TINY_RCU 540 541 /* 542 * Print any non-default Tasks RCU settings. 543 */ 544 static void __init rcu_tasks_bootup_oddness(void) 545 { 546 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 547 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) 548 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); 549 #endif /* #ifdef CONFIG_TASKS_RCU */ 550 #ifdef CONFIG_TASKS_RCU 551 pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); 552 #endif /* #ifdef CONFIG_TASKS_RCU */ 553 #ifdef CONFIG_TASKS_RUDE_RCU 554 pr_info("\tRude variant of Tasks RCU enabled.\n"); 555 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 556 #ifdef CONFIG_TASKS_TRACE_RCU 557 pr_info("\tTracing variant of Tasks RCU enabled.\n"); 558 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ 559 } 560 561 #endif /* #ifndef CONFIG_TINY_RCU */ 562 563 #ifndef CONFIG_TINY_RCU 564 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ 565 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) 566 { 567 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, 0); // for_each... 568 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n", 569 rtp->kname, 570 tasks_gp_state_getname(rtp), data_race(rtp->gp_state), 571 jiffies - data_race(rtp->gp_jiffies), 572 data_race(rcu_seq_current(&rtp->tasks_gp_seq)), 573 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), 574 ".k"[!!data_race(rtp->kthread_ptr)], 575 ".C"[!data_race(rcu_segcblist_empty(&rtpcp->cblist))], 576 s); 577 } 578 #endif // #ifndef CONFIG_TINY_RCU 579 580 static void exit_tasks_rcu_finish_trace(struct task_struct *t); 581 582 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) 583 584 //////////////////////////////////////////////////////////////////////// 585 // 586 // Shared code between task-list-scanning variants of Tasks RCU. 587 588 /* Wait for one RCU-tasks grace period. */ 589 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) 590 { 591 struct task_struct *g, *t; 592 unsigned long lastreport; 593 LIST_HEAD(holdouts); 594 int fract; 595 596 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); 597 rtp->pregp_func(); 598 599 /* 600 * There were callbacks, so we need to wait for an RCU-tasks 601 * grace period. Start off by scanning the task list for tasks 602 * that are not already voluntarily blocked. Mark these tasks 603 * and make a list of them in holdouts. 604 */ 605 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); 606 rcu_read_lock(); 607 for_each_process_thread(g, t) 608 rtp->pertask_func(t, &holdouts); 609 rcu_read_unlock(); 610 611 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); 612 rtp->postscan_func(&holdouts); 613 614 /* 615 * Each pass through the following loop scans the list of holdout 616 * tasks, removing any that are no longer holdouts. When the list 617 * is empty, we are done. 618 */ 619 lastreport = jiffies; 620 621 // Start off with initial wait and slowly back off to 1 HZ wait. 622 fract = rtp->init_fract; 623 624 while (!list_empty(&holdouts)) { 625 bool firstreport; 626 bool needreport; 627 int rtst; 628 629 /* Slowly back off waiting for holdouts */ 630 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); 631 schedule_timeout_idle(fract); 632 633 if (fract < HZ) 634 fract++; 635 636 rtst = READ_ONCE(rcu_task_stall_timeout); 637 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); 638 if (needreport) 639 lastreport = jiffies; 640 firstreport = true; 641 WARN_ON(signal_pending(current)); 642 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); 643 rtp->holdouts_func(&holdouts, needreport, &firstreport); 644 } 645 646 set_tasks_gp_state(rtp, RTGS_POST_GP); 647 rtp->postgp_func(rtp); 648 } 649 650 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ 651 652 #ifdef CONFIG_TASKS_RCU 653 654 //////////////////////////////////////////////////////////////////////// 655 // 656 // Simple variant of RCU whose quiescent states are voluntary context 657 // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. 658 // As such, grace periods can take one good long time. There are no 659 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() 660 // because this implementation is intended to get the system into a safe 661 // state for some of the manipulations involved in tracing and the like. 662 // Finally, this implementation does not support high call_rcu_tasks() 663 // rates from multiple CPUs. If this is required, per-CPU callback lists 664 // will be needed. 665 // 666 // The implementation uses rcu_tasks_wait_gp(), which relies on function 667 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() 668 // function sets these function pointers up so that rcu_tasks_wait_gp() 669 // invokes these functions in this order: 670 // 671 // rcu_tasks_pregp_step(): 672 // Invokes synchronize_rcu() in order to wait for all in-flight 673 // t->on_rq and t->nvcsw transitions to complete. This works because 674 // all such transitions are carried out with interrupts disabled. 675 // rcu_tasks_pertask(), invoked on every non-idle task: 676 // For every runnable non-idle task other than the current one, use 677 // get_task_struct() to pin down that task, snapshot that task's 678 // number of voluntary context switches, and add that task to the 679 // holdout list. 680 // rcu_tasks_postscan(): 681 // Invoke synchronize_srcu() to ensure that all tasks that were 682 // in the process of exiting (and which thus might not know to 683 // synchronize with this RCU Tasks grace period) have completed 684 // exiting. 685 // check_all_holdout_tasks(), repeatedly until holdout list is empty: 686 // Scans the holdout list, attempting to identify a quiescent state 687 // for each task on the list. If there is a quiescent state, the 688 // corresponding task is removed from the holdout list. 689 // rcu_tasks_postgp(): 690 // Invokes synchronize_rcu() in order to ensure that all prior 691 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks 692 // to have happened before the end of this RCU Tasks grace period. 693 // Again, this works because all such transitions are carried out 694 // with interrupts disabled. 695 // 696 // For each exiting task, the exit_tasks_rcu_start() and 697 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU 698 // read-side critical sections waited for by rcu_tasks_postscan(). 699 // 700 // Pre-grace-period update-side code is ordered before the grace 701 // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code 702 // is ordered before the grace period via synchronize_rcu() call in 703 // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt 704 // disabling. 705 706 /* Pre-grace-period preparation. */ 707 static void rcu_tasks_pregp_step(void) 708 { 709 /* 710 * Wait for all pre-existing t->on_rq and t->nvcsw transitions 711 * to complete. Invoking synchronize_rcu() suffices because all 712 * these transitions occur with interrupts disabled. Without this 713 * synchronize_rcu(), a read-side critical section that started 714 * before the grace period might be incorrectly seen as having 715 * started after the grace period. 716 * 717 * This synchronize_rcu() also dispenses with the need for a 718 * memory barrier on the first store to t->rcu_tasks_holdout, 719 * as it forces the store to happen after the beginning of the 720 * grace period. 721 */ 722 synchronize_rcu(); 723 } 724 725 /* Per-task initial processing. */ 726 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) 727 { 728 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { 729 get_task_struct(t); 730 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); 731 WRITE_ONCE(t->rcu_tasks_holdout, true); 732 list_add(&t->rcu_tasks_holdout_list, hop); 733 } 734 } 735 736 /* Processing between scanning taskslist and draining the holdout list. */ 737 static void rcu_tasks_postscan(struct list_head *hop) 738 { 739 /* 740 * Wait for tasks that are in the process of exiting. This 741 * does only part of the job, ensuring that all tasks that were 742 * previously exiting reach the point where they have disabled 743 * preemption, allowing the later synchronize_rcu() to finish 744 * the job. 745 */ 746 synchronize_srcu(&tasks_rcu_exit_srcu); 747 } 748 749 /* See if tasks are still holding out, complain if so. */ 750 static void check_holdout_task(struct task_struct *t, 751 bool needreport, bool *firstreport) 752 { 753 int cpu; 754 755 if (!READ_ONCE(t->rcu_tasks_holdout) || 756 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || 757 !READ_ONCE(t->on_rq) || 758 (IS_ENABLED(CONFIG_NO_HZ_FULL) && 759 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { 760 WRITE_ONCE(t->rcu_tasks_holdout, false); 761 list_del_init(&t->rcu_tasks_holdout_list); 762 put_task_struct(t); 763 return; 764 } 765 rcu_request_urgent_qs_task(t); 766 if (!needreport) 767 return; 768 if (*firstreport) { 769 pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); 770 *firstreport = false; 771 } 772 cpu = task_cpu(t); 773 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", 774 t, ".I"[is_idle_task(t)], 775 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], 776 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, 777 t->rcu_tasks_idle_cpu, cpu); 778 sched_show_task(t); 779 } 780 781 /* Scan the holdout lists for tasks no longer holding out. */ 782 static void check_all_holdout_tasks(struct list_head *hop, 783 bool needreport, bool *firstreport) 784 { 785 struct task_struct *t, *t1; 786 787 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { 788 check_holdout_task(t, needreport, firstreport); 789 cond_resched(); 790 } 791 } 792 793 /* Finish off the Tasks-RCU grace period. */ 794 static void rcu_tasks_postgp(struct rcu_tasks *rtp) 795 { 796 /* 797 * Because ->on_rq and ->nvcsw are not guaranteed to have a full 798 * memory barriers prior to them in the schedule() path, memory 799 * reordering on other CPUs could cause their RCU-tasks read-side 800 * critical sections to extend past the end of the grace period. 801 * However, because these ->nvcsw updates are carried out with 802 * interrupts disabled, we can use synchronize_rcu() to force the 803 * needed ordering on all such CPUs. 804 * 805 * This synchronize_rcu() also confines all ->rcu_tasks_holdout 806 * accesses to be within the grace period, avoiding the need for 807 * memory barriers for ->rcu_tasks_holdout accesses. 808 * 809 * In addition, this synchronize_rcu() waits for exiting tasks 810 * to complete their final preempt_disable() region of execution, 811 * cleaning up after the synchronize_srcu() above. 812 */ 813 synchronize_rcu(); 814 } 815 816 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); 817 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); 818 819 /** 820 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period 821 * @rhp: structure to be used for queueing the RCU updates. 822 * @func: actual callback function to be invoked after the grace period 823 * 824 * The callback function will be invoked some time after a full grace 825 * period elapses, in other words after all currently executing RCU 826 * read-side critical sections have completed. call_rcu_tasks() assumes 827 * that the read-side critical sections end at a voluntary context 828 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, 829 * or transition to usermode execution. As such, there are no read-side 830 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 831 * this primitive is intended to determine that all tasks have passed 832 * through a safe state, not so much for data-structure synchronization. 833 * 834 * See the description of call_rcu() for more detailed information on 835 * memory ordering guarantees. 836 */ 837 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) 838 { 839 call_rcu_tasks_generic(rhp, func, &rcu_tasks); 840 } 841 EXPORT_SYMBOL_GPL(call_rcu_tasks); 842 843 /** 844 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. 845 * 846 * Control will return to the caller some time after a full rcu-tasks 847 * grace period has elapsed, in other words after all currently 848 * executing rcu-tasks read-side critical sections have elapsed. These 849 * read-side critical sections are delimited by calls to schedule(), 850 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls 851 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). 852 * 853 * This is a very specialized primitive, intended only for a few uses in 854 * tracing and other situations requiring manipulation of function 855 * preambles and profiling hooks. The synchronize_rcu_tasks() function 856 * is not (yet) intended for heavy use from multiple CPUs. 857 * 858 * See the description of synchronize_rcu() for more detailed information 859 * on memory ordering guarantees. 860 */ 861 void synchronize_rcu_tasks(void) 862 { 863 synchronize_rcu_tasks_generic(&rcu_tasks); 864 } 865 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); 866 867 /** 868 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. 869 * 870 * Although the current implementation is guaranteed to wait, it is not 871 * obligated to, for example, if there are no pending callbacks. 872 */ 873 void rcu_barrier_tasks(void) 874 { 875 rcu_barrier_tasks_generic(&rcu_tasks); 876 } 877 EXPORT_SYMBOL_GPL(rcu_barrier_tasks); 878 879 static int __init rcu_spawn_tasks_kthread(void) 880 { 881 cblist_init_generic(&rcu_tasks); 882 rcu_tasks.gp_sleep = HZ / 10; 883 rcu_tasks.init_fract = HZ / 10; 884 rcu_tasks.pregp_func = rcu_tasks_pregp_step; 885 rcu_tasks.pertask_func = rcu_tasks_pertask; 886 rcu_tasks.postscan_func = rcu_tasks_postscan; 887 rcu_tasks.holdouts_func = check_all_holdout_tasks; 888 rcu_tasks.postgp_func = rcu_tasks_postgp; 889 rcu_spawn_tasks_kthread_generic(&rcu_tasks); 890 return 0; 891 } 892 893 #if !defined(CONFIG_TINY_RCU) 894 void show_rcu_tasks_classic_gp_kthread(void) 895 { 896 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, ""); 897 } 898 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); 899 #endif // !defined(CONFIG_TINY_RCU) 900 901 /* Do the srcu_read_lock() for the above synchronize_srcu(). */ 902 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu) 903 { 904 preempt_disable(); 905 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); 906 preempt_enable(); 907 } 908 909 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */ 910 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) 911 { 912 struct task_struct *t = current; 913 914 preempt_disable(); 915 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); 916 preempt_enable(); 917 exit_tasks_rcu_finish_trace(t); 918 } 919 920 #else /* #ifdef CONFIG_TASKS_RCU */ 921 void exit_tasks_rcu_start(void) { } 922 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } 923 #endif /* #else #ifdef CONFIG_TASKS_RCU */ 924 925 #ifdef CONFIG_TASKS_RUDE_RCU 926 927 //////////////////////////////////////////////////////////////////////// 928 // 929 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of 930 // passing an empty function to schedule_on_each_cpu(). This approach 931 // provides an asynchronous call_rcu_tasks_rude() API and batching of 932 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. 933 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide 934 // and induces otherwise unnecessary context switches on all online CPUs, 935 // whether idle or not. 936 // 937 // Callback handling is provided by the rcu_tasks_kthread() function. 938 // 939 // Ordering is provided by the scheduler's context-switch code. 940 941 // Empty function to allow workqueues to force a context switch. 942 static void rcu_tasks_be_rude(struct work_struct *work) 943 { 944 } 945 946 // Wait for one rude RCU-tasks grace period. 947 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) 948 { 949 rtp->n_ipis += cpumask_weight(cpu_online_mask); 950 schedule_on_each_cpu(rcu_tasks_be_rude); 951 } 952 953 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); 954 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, 955 "RCU Tasks Rude"); 956 957 /** 958 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period 959 * @rhp: structure to be used for queueing the RCU updates. 960 * @func: actual callback function to be invoked after the grace period 961 * 962 * The callback function will be invoked some time after a full grace 963 * period elapses, in other words after all currently executing RCU 964 * read-side critical sections have completed. call_rcu_tasks_rude() 965 * assumes that the read-side critical sections end at context switch, 966 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as 967 * usermode execution is schedulable). As such, there are no read-side 968 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because 969 * this primitive is intended to determine that all tasks have passed 970 * through a safe state, not so much for data-structure synchronization. 971 * 972 * See the description of call_rcu() for more detailed information on 973 * memory ordering guarantees. 974 */ 975 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) 976 { 977 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude); 978 } 979 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); 980 981 /** 982 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period 983 * 984 * Control will return to the caller some time after a rude rcu-tasks 985 * grace period has elapsed, in other words after all currently 986 * executing rcu-tasks read-side critical sections have elapsed. These 987 * read-side critical sections are delimited by calls to schedule(), 988 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable 989 * context), and (in theory, anyway) cond_resched(). 990 * 991 * This is a very specialized primitive, intended only for a few uses in 992 * tracing and other situations requiring manipulation of function preambles 993 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not 994 * (yet) intended for heavy use from multiple CPUs. 995 * 996 * See the description of synchronize_rcu() for more detailed information 997 * on memory ordering guarantees. 998 */ 999 void synchronize_rcu_tasks_rude(void) 1000 { 1001 synchronize_rcu_tasks_generic(&rcu_tasks_rude); 1002 } 1003 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); 1004 1005 /** 1006 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. 1007 * 1008 * Although the current implementation is guaranteed to wait, it is not 1009 * obligated to, for example, if there are no pending callbacks. 1010 */ 1011 void rcu_barrier_tasks_rude(void) 1012 { 1013 rcu_barrier_tasks_generic(&rcu_tasks_rude); 1014 } 1015 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); 1016 1017 static int __init rcu_spawn_tasks_rude_kthread(void) 1018 { 1019 cblist_init_generic(&rcu_tasks_rude); 1020 rcu_tasks_rude.gp_sleep = HZ / 10; 1021 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude); 1022 return 0; 1023 } 1024 1025 #if !defined(CONFIG_TINY_RCU) 1026 void show_rcu_tasks_rude_gp_kthread(void) 1027 { 1028 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, ""); 1029 } 1030 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); 1031 #endif // !defined(CONFIG_TINY_RCU) 1032 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ 1033 1034 //////////////////////////////////////////////////////////////////////// 1035 // 1036 // Tracing variant of Tasks RCU. This variant is designed to be used 1037 // to protect tracing hooks, including those of BPF. This variant 1038 // therefore: 1039 // 1040 // 1. Has explicit read-side markers to allow finite grace periods 1041 // in the face of in-kernel loops for PREEMPT=n builds. 1042 // 1043 // 2. Protects code in the idle loop, exception entry/exit, and 1044 // CPU-hotplug code paths, similar to the capabilities of SRCU. 1045 // 1046 // 3. Avoids expensive read-side instructions, having overhead similar 1047 // to that of Preemptible RCU. 1048 // 1049 // There are of course downsides. The grace-period code can send IPIs to 1050 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace. 1051 // It is necessary to scan the full tasklist, much as for Tasks RCU. There 1052 // is a single callback queue guarded by a single lock, again, much as for 1053 // Tasks RCU. If needed, these downsides can be at least partially remedied. 1054 // 1055 // Perhaps most important, this variant of RCU does not affect the vanilla 1056 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace 1057 // readers can operate from idle, offline, and exception entry/exit in no 1058 // way allows rcu_preempt and rcu_sched readers to also do so. 1059 // 1060 // The implementation uses rcu_tasks_wait_gp(), which relies on function 1061 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() 1062 // function sets these function pointers up so that rcu_tasks_wait_gp() 1063 // invokes these functions in this order: 1064 // 1065 // rcu_tasks_trace_pregp_step(): 1066 // Initialize the count of readers and block CPU-hotplug operations. 1067 // rcu_tasks_trace_pertask(), invoked on every non-idle task: 1068 // Initialize per-task state and attempt to identify an immediate 1069 // quiescent state for that task, or, failing that, attempt to 1070 // set that task's .need_qs flag so that task's next outermost 1071 // rcu_read_unlock_trace() will report the quiescent state (in which 1072 // case the count of readers is incremented). If both attempts fail, 1073 // the task is added to a "holdout" list. Note that IPIs are used 1074 // to invoke trc_read_check_handler() in the context of running tasks 1075 // in order to avoid ordering overhead on common-case shared-variable 1076 // accessses. 1077 // rcu_tasks_trace_postscan(): 1078 // Initialize state and attempt to identify an immediate quiescent 1079 // state as above (but only for idle tasks), unblock CPU-hotplug 1080 // operations, and wait for an RCU grace period to avoid races with 1081 // tasks that are in the process of exiting. 1082 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: 1083 // Scans the holdout list, attempting to identify a quiescent state 1084 // for each task on the list. If there is a quiescent state, the 1085 // corresponding task is removed from the holdout list. 1086 // rcu_tasks_trace_postgp(): 1087 // Wait for the count of readers do drop to zero, reporting any stalls. 1088 // Also execute full memory barriers to maintain ordering with code 1089 // executing after the grace period. 1090 // 1091 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. 1092 // 1093 // Pre-grace-period update-side code is ordered before the grace 1094 // period via the ->cbs_lock and barriers in rcu_tasks_kthread(). 1095 // Pre-grace-period read-side code is ordered before the grace period by 1096 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by 1097 // scheduler context-switch ordering (for locked-down non-running readers). 1098 1099 // The lockdep state must be outside of #ifdef to be useful. 1100 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1101 static struct lock_class_key rcu_lock_trace_key; 1102 struct lockdep_map rcu_trace_lock_map = 1103 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); 1104 EXPORT_SYMBOL_GPL(rcu_trace_lock_map); 1105 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 1106 1107 #ifdef CONFIG_TASKS_TRACE_RCU 1108 1109 static atomic_t trc_n_readers_need_end; // Number of waited-for readers. 1110 static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks. 1111 1112 // Record outstanding IPIs to each CPU. No point in sending two... 1113 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); 1114 1115 // The number of detections of task quiescent state relying on 1116 // heavyweight readers executing explicit memory barriers. 1117 static unsigned long n_heavy_reader_attempts; 1118 static unsigned long n_heavy_reader_updates; 1119 static unsigned long n_heavy_reader_ofl_updates; 1120 1121 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); 1122 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, 1123 "RCU Tasks Trace"); 1124 1125 /* 1126 * This irq_work handler allows rcu_read_unlock_trace() to be invoked 1127 * while the scheduler locks are held. 1128 */ 1129 static void rcu_read_unlock_iw(struct irq_work *iwp) 1130 { 1131 wake_up(&trc_wait); 1132 } 1133 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw); 1134 1135 /* If we are the last reader, wake up the grace-period kthread. */ 1136 void rcu_read_unlock_trace_special(struct task_struct *t) 1137 { 1138 int nq = READ_ONCE(t->trc_reader_special.b.need_qs); 1139 1140 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && 1141 t->trc_reader_special.b.need_mb) 1142 smp_mb(); // Pairs with update-side barriers. 1143 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. 1144 if (nq) 1145 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); 1146 WRITE_ONCE(t->trc_reader_nesting, 0); 1147 if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) 1148 irq_work_queue(&rcu_tasks_trace_iw); 1149 } 1150 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); 1151 1152 /* Add a task to the holdout list, if it is not already on the list. */ 1153 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) 1154 { 1155 if (list_empty(&t->trc_holdout_list)) { 1156 get_task_struct(t); 1157 list_add(&t->trc_holdout_list, bhp); 1158 } 1159 } 1160 1161 /* Remove a task from the holdout list, if it is in fact present. */ 1162 static void trc_del_holdout(struct task_struct *t) 1163 { 1164 if (!list_empty(&t->trc_holdout_list)) { 1165 list_del_init(&t->trc_holdout_list); 1166 put_task_struct(t); 1167 } 1168 } 1169 1170 /* IPI handler to check task state. */ 1171 static void trc_read_check_handler(void *t_in) 1172 { 1173 struct task_struct *t = current; 1174 struct task_struct *texp = t_in; 1175 1176 // If the task is no longer running on this CPU, leave. 1177 if (unlikely(texp != t)) { 1178 goto reset_ipi; // Already on holdout list, so will check later. 1179 } 1180 1181 // If the task is not in a read-side critical section, and 1182 // if this is the last reader, awaken the grace-period kthread. 1183 if (likely(!READ_ONCE(t->trc_reader_nesting))) { 1184 WRITE_ONCE(t->trc_reader_checked, true); 1185 goto reset_ipi; 1186 } 1187 // If we are racing with an rcu_read_unlock_trace(), try again later. 1188 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) 1189 goto reset_ipi; 1190 WRITE_ONCE(t->trc_reader_checked, true); 1191 1192 // Get here if the task is in a read-side critical section. Set 1193 // its state so that it will awaken the grace-period kthread upon 1194 // exit from that critical section. 1195 atomic_inc(&trc_n_readers_need_end); // One more to wait on. 1196 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); 1197 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); 1198 1199 reset_ipi: 1200 // Allow future IPIs to be sent on CPU and for task. 1201 // Also order this IPI handler against any later manipulations of 1202 // the intended task. 1203 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ 1204 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ 1205 } 1206 1207 /* Callback function for scheduler to check locked-down task. */ 1208 static int trc_inspect_reader(struct task_struct *t, void *arg) 1209 { 1210 int cpu = task_cpu(t); 1211 int nesting; 1212 bool ofl = cpu_is_offline(cpu); 1213 1214 if (task_curr(t)) { 1215 WARN_ON_ONCE(ofl && !is_idle_task(t)); 1216 1217 // If no chance of heavyweight readers, do it the hard way. 1218 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) 1219 return -EINVAL; 1220 1221 // If heavyweight readers are enabled on the remote task, 1222 // we can inspect its state despite its currently running. 1223 // However, we cannot safely change its state. 1224 n_heavy_reader_attempts++; 1225 if (!ofl && // Check for "running" idle tasks on offline CPUs. 1226 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) 1227 return -EINVAL; // No quiescent state, do it the hard way. 1228 n_heavy_reader_updates++; 1229 if (ofl) 1230 n_heavy_reader_ofl_updates++; 1231 nesting = 0; 1232 } else { 1233 // The task is not running, so C-language access is safe. 1234 nesting = t->trc_reader_nesting; 1235 } 1236 1237 // If not exiting a read-side critical section, mark as checked 1238 // so that the grace-period kthread will remove it from the 1239 // holdout list. 1240 t->trc_reader_checked = nesting >= 0; 1241 if (nesting <= 0) 1242 return nesting ? -EINVAL : 0; // If in QS, done, otherwise try again later. 1243 1244 // The task is in a read-side critical section, so set up its 1245 // state so that it will awaken the grace-period kthread upon exit 1246 // from that critical section. 1247 atomic_inc(&trc_n_readers_need_end); // One more to wait on. 1248 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)); 1249 WRITE_ONCE(t->trc_reader_special.b.need_qs, true); 1250 return 0; 1251 } 1252 1253 /* Attempt to extract the state for the specified task. */ 1254 static void trc_wait_for_one_reader(struct task_struct *t, 1255 struct list_head *bhp) 1256 { 1257 int cpu; 1258 1259 // If a previous IPI is still in flight, let it complete. 1260 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI 1261 return; 1262 1263 // The current task had better be in a quiescent state. 1264 if (t == current) { 1265 t->trc_reader_checked = true; 1266 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1267 return; 1268 } 1269 1270 // Attempt to nail down the task for inspection. 1271 get_task_struct(t); 1272 if (!task_call_func(t, trc_inspect_reader, NULL)) { 1273 put_task_struct(t); 1274 return; 1275 } 1276 put_task_struct(t); 1277 1278 // If this task is not yet on the holdout list, then we are in 1279 // an RCU read-side critical section. Otherwise, the invocation of 1280 // trc_add_holdout() that added it to the list did the necessary 1281 // get_task_struct(). Either way, the task cannot be freed out 1282 // from under this code. 1283 1284 // If currently running, send an IPI, either way, add to list. 1285 trc_add_holdout(t, bhp); 1286 if (task_curr(t) && 1287 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { 1288 // The task is currently running, so try IPIing it. 1289 cpu = task_cpu(t); 1290 1291 // If there is already an IPI outstanding, let it happen. 1292 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) 1293 return; 1294 1295 per_cpu(trc_ipi_to_cpu, cpu) = true; 1296 t->trc_ipi_to_cpu = cpu; 1297 rcu_tasks_trace.n_ipis++; 1298 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { 1299 // Just in case there is some other reason for 1300 // failure than the target CPU being offline. 1301 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", 1302 __func__, cpu); 1303 rcu_tasks_trace.n_ipis_fails++; 1304 per_cpu(trc_ipi_to_cpu, cpu) = false; 1305 t->trc_ipi_to_cpu = -1; 1306 } 1307 } 1308 } 1309 1310 /* Initialize for a new RCU-tasks-trace grace period. */ 1311 static void rcu_tasks_trace_pregp_step(void) 1312 { 1313 int cpu; 1314 1315 // Allow for fast-acting IPIs. 1316 atomic_set(&trc_n_readers_need_end, 1); 1317 1318 // There shouldn't be any old IPIs, but... 1319 for_each_possible_cpu(cpu) 1320 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); 1321 1322 // Disable CPU hotplug across the tasklist scan. 1323 // This also waits for all readers in CPU-hotplug code paths. 1324 cpus_read_lock(); 1325 } 1326 1327 /* Do first-round processing for the specified task. */ 1328 static void rcu_tasks_trace_pertask(struct task_struct *t, 1329 struct list_head *hop) 1330 { 1331 // During early boot when there is only the one boot CPU, there 1332 // is no idle task for the other CPUs. Just return. 1333 if (unlikely(t == NULL)) 1334 return; 1335 1336 WRITE_ONCE(t->trc_reader_special.b.need_qs, false); 1337 WRITE_ONCE(t->trc_reader_checked, false); 1338 t->trc_ipi_to_cpu = -1; 1339 trc_wait_for_one_reader(t, hop); 1340 } 1341 1342 /* 1343 * Do intermediate processing between task and holdout scans and 1344 * pick up the idle tasks. 1345 */ 1346 static void rcu_tasks_trace_postscan(struct list_head *hop) 1347 { 1348 int cpu; 1349 1350 for_each_possible_cpu(cpu) 1351 rcu_tasks_trace_pertask(idle_task(cpu), hop); 1352 1353 // Re-enable CPU hotplug now that the tasklist scan has completed. 1354 cpus_read_unlock(); 1355 1356 // Wait for late-stage exiting tasks to finish exiting. 1357 // These might have passed the call to exit_tasks_rcu_finish(). 1358 synchronize_rcu(); 1359 // Any tasks that exit after this point will set ->trc_reader_checked. 1360 } 1361 1362 /* Communicate task state back to the RCU tasks trace stall warning request. */ 1363 struct trc_stall_chk_rdr { 1364 int nesting; 1365 int ipi_to_cpu; 1366 u8 needqs; 1367 }; 1368 1369 static int trc_check_slow_task(struct task_struct *t, void *arg) 1370 { 1371 struct trc_stall_chk_rdr *trc_rdrp = arg; 1372 1373 if (task_curr(t)) 1374 return false; // It is running, so decline to inspect it. 1375 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); 1376 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); 1377 trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs); 1378 return true; 1379 } 1380 1381 /* Show the state of a task stalling the current RCU tasks trace GP. */ 1382 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) 1383 { 1384 int cpu; 1385 struct trc_stall_chk_rdr trc_rdr; 1386 bool is_idle_tsk = is_idle_task(t); 1387 1388 if (*firstreport) { 1389 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); 1390 *firstreport = false; 1391 } 1392 cpu = task_cpu(t); 1393 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) 1394 pr_alert("P%d: %c\n", 1395 t->pid, 1396 ".i"[is_idle_tsk]); 1397 else 1398 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n", 1399 t->pid, 1400 ".I"[trc_rdr.ipi_to_cpu >= 0], 1401 ".i"[is_idle_tsk], 1402 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], 1403 trc_rdr.nesting, 1404 " N"[!!trc_rdr.needqs], 1405 cpu); 1406 sched_show_task(t); 1407 } 1408 1409 /* List stalled IPIs for RCU tasks trace. */ 1410 static void show_stalled_ipi_trace(void) 1411 { 1412 int cpu; 1413 1414 for_each_possible_cpu(cpu) 1415 if (per_cpu(trc_ipi_to_cpu, cpu)) 1416 pr_alert("\tIPI outstanding to CPU %d\n", cpu); 1417 } 1418 1419 /* Do one scan of the holdout list. */ 1420 static void check_all_holdout_tasks_trace(struct list_head *hop, 1421 bool needreport, bool *firstreport) 1422 { 1423 struct task_struct *g, *t; 1424 1425 // Disable CPU hotplug across the holdout list scan. 1426 cpus_read_lock(); 1427 1428 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { 1429 // If safe and needed, try to check the current task. 1430 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && 1431 !READ_ONCE(t->trc_reader_checked)) 1432 trc_wait_for_one_reader(t, hop); 1433 1434 // If check succeeded, remove this task from the list. 1435 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && 1436 READ_ONCE(t->trc_reader_checked)) 1437 trc_del_holdout(t); 1438 else if (needreport) 1439 show_stalled_task_trace(t, firstreport); 1440 } 1441 1442 // Re-enable CPU hotplug now that the holdout list scan has completed. 1443 cpus_read_unlock(); 1444 1445 if (needreport) { 1446 if (*firstreport) 1447 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); 1448 show_stalled_ipi_trace(); 1449 } 1450 } 1451 1452 static void rcu_tasks_trace_empty_fn(void *unused) 1453 { 1454 } 1455 1456 /* Wait for grace period to complete and provide ordering. */ 1457 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) 1458 { 1459 int cpu; 1460 bool firstreport; 1461 struct task_struct *g, *t; 1462 LIST_HEAD(holdouts); 1463 long ret; 1464 1465 // Wait for any lingering IPI handlers to complete. Note that 1466 // if a CPU has gone offline or transitioned to userspace in the 1467 // meantime, all IPI handlers should have been drained beforehand. 1468 // Yes, this assumes that CPUs process IPIs in order. If that ever 1469 // changes, there will need to be a recheck and/or timed wait. 1470 for_each_online_cpu(cpu) 1471 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) 1472 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1); 1473 1474 // Remove the safety count. 1475 smp_mb__before_atomic(); // Order vs. earlier atomics 1476 atomic_dec(&trc_n_readers_need_end); 1477 smp_mb__after_atomic(); // Order vs. later atomics 1478 1479 // Wait for readers. 1480 set_tasks_gp_state(rtp, RTGS_WAIT_READERS); 1481 for (;;) { 1482 ret = wait_event_idle_exclusive_timeout( 1483 trc_wait, 1484 atomic_read(&trc_n_readers_need_end) == 0, 1485 READ_ONCE(rcu_task_stall_timeout)); 1486 if (ret) 1487 break; // Count reached zero. 1488 // Stall warning time, so make a list of the offenders. 1489 rcu_read_lock(); 1490 for_each_process_thread(g, t) 1491 if (READ_ONCE(t->trc_reader_special.b.need_qs)) 1492 trc_add_holdout(t, &holdouts); 1493 rcu_read_unlock(); 1494 firstreport = true; 1495 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) { 1496 if (READ_ONCE(t->trc_reader_special.b.need_qs)) 1497 show_stalled_task_trace(t, &firstreport); 1498 trc_del_holdout(t); // Release task_struct reference. 1499 } 1500 if (firstreport) 1501 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n"); 1502 show_stalled_ipi_trace(); 1503 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end)); 1504 } 1505 smp_mb(); // Caller's code must be ordered after wakeup. 1506 // Pairs with pretty much every ordering primitive. 1507 } 1508 1509 /* Report any needed quiescent state for this exiting task. */ 1510 static void exit_tasks_rcu_finish_trace(struct task_struct *t) 1511 { 1512 WRITE_ONCE(t->trc_reader_checked, true); 1513 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); 1514 WRITE_ONCE(t->trc_reader_nesting, 0); 1515 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs))) 1516 rcu_read_unlock_trace_special(t); 1517 } 1518 1519 /** 1520 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period 1521 * @rhp: structure to be used for queueing the RCU updates. 1522 * @func: actual callback function to be invoked after the grace period 1523 * 1524 * The callback function will be invoked some time after a trace rcu-tasks 1525 * grace period elapses, in other words after all currently executing 1526 * trace rcu-tasks read-side critical sections have completed. These 1527 * read-side critical sections are delimited by calls to rcu_read_lock_trace() 1528 * and rcu_read_unlock_trace(). 1529 * 1530 * See the description of call_rcu() for more detailed information on 1531 * memory ordering guarantees. 1532 */ 1533 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) 1534 { 1535 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace); 1536 } 1537 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); 1538 1539 /** 1540 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period 1541 * 1542 * Control will return to the caller some time after a trace rcu-tasks 1543 * grace period has elapsed, in other words after all currently executing 1544 * trace rcu-tasks read-side critical sections have elapsed. These read-side 1545 * critical sections are delimited by calls to rcu_read_lock_trace() 1546 * and rcu_read_unlock_trace(). 1547 * 1548 * This is a very specialized primitive, intended only for a few uses in 1549 * tracing and other situations requiring manipulation of function preambles 1550 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not 1551 * (yet) intended for heavy use from multiple CPUs. 1552 * 1553 * See the description of synchronize_rcu() for more detailed information 1554 * on memory ordering guarantees. 1555 */ 1556 void synchronize_rcu_tasks_trace(void) 1557 { 1558 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); 1559 synchronize_rcu_tasks_generic(&rcu_tasks_trace); 1560 } 1561 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); 1562 1563 /** 1564 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. 1565 * 1566 * Although the current implementation is guaranteed to wait, it is not 1567 * obligated to, for example, if there are no pending callbacks. 1568 */ 1569 void rcu_barrier_tasks_trace(void) 1570 { 1571 rcu_barrier_tasks_generic(&rcu_tasks_trace); 1572 } 1573 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); 1574 1575 static int __init rcu_spawn_tasks_trace_kthread(void) 1576 { 1577 cblist_init_generic(&rcu_tasks_trace); 1578 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { 1579 rcu_tasks_trace.gp_sleep = HZ / 10; 1580 rcu_tasks_trace.init_fract = HZ / 10; 1581 } else { 1582 rcu_tasks_trace.gp_sleep = HZ / 200; 1583 if (rcu_tasks_trace.gp_sleep <= 0) 1584 rcu_tasks_trace.gp_sleep = 1; 1585 rcu_tasks_trace.init_fract = HZ / 200; 1586 if (rcu_tasks_trace.init_fract <= 0) 1587 rcu_tasks_trace.init_fract = 1; 1588 } 1589 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; 1590 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask; 1591 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; 1592 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; 1593 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; 1594 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace); 1595 return 0; 1596 } 1597 1598 #if !defined(CONFIG_TINY_RCU) 1599 void show_rcu_tasks_trace_gp_kthread(void) 1600 { 1601 char buf[64]; 1602 1603 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end), 1604 data_race(n_heavy_reader_ofl_updates), 1605 data_race(n_heavy_reader_updates), 1606 data_race(n_heavy_reader_attempts)); 1607 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf); 1608 } 1609 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); 1610 #endif // !defined(CONFIG_TINY_RCU) 1611 1612 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ 1613 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } 1614 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ 1615 1616 #ifndef CONFIG_TINY_RCU 1617 void show_rcu_tasks_gp_kthreads(void) 1618 { 1619 show_rcu_tasks_classic_gp_kthread(); 1620 show_rcu_tasks_rude_gp_kthread(); 1621 show_rcu_tasks_trace_gp_kthread(); 1622 } 1623 #endif /* #ifndef CONFIG_TINY_RCU */ 1624 1625 #ifdef CONFIG_PROVE_RCU 1626 struct rcu_tasks_test_desc { 1627 struct rcu_head rh; 1628 const char *name; 1629 bool notrun; 1630 }; 1631 1632 static struct rcu_tasks_test_desc tests[] = { 1633 { 1634 .name = "call_rcu_tasks()", 1635 /* If not defined, the test is skipped. */ 1636 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU), 1637 }, 1638 { 1639 .name = "call_rcu_tasks_rude()", 1640 /* If not defined, the test is skipped. */ 1641 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU), 1642 }, 1643 { 1644 .name = "call_rcu_tasks_trace()", 1645 /* If not defined, the test is skipped. */ 1646 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU) 1647 } 1648 }; 1649 1650 static void test_rcu_tasks_callback(struct rcu_head *rhp) 1651 { 1652 struct rcu_tasks_test_desc *rttd = 1653 container_of(rhp, struct rcu_tasks_test_desc, rh); 1654 1655 pr_info("Callback from %s invoked.\n", rttd->name); 1656 1657 rttd->notrun = true; 1658 } 1659 1660 static void rcu_tasks_initiate_self_tests(void) 1661 { 1662 pr_info("Running RCU-tasks wait API self tests\n"); 1663 #ifdef CONFIG_TASKS_RCU 1664 synchronize_rcu_tasks(); 1665 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); 1666 #endif 1667 1668 #ifdef CONFIG_TASKS_RUDE_RCU 1669 synchronize_rcu_tasks_rude(); 1670 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); 1671 #endif 1672 1673 #ifdef CONFIG_TASKS_TRACE_RCU 1674 synchronize_rcu_tasks_trace(); 1675 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); 1676 #endif 1677 } 1678 1679 static int rcu_tasks_verify_self_tests(void) 1680 { 1681 int ret = 0; 1682 int i; 1683 1684 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1685 if (!tests[i].notrun) { // still hanging. 1686 pr_err("%s has been failed.\n", tests[i].name); 1687 ret = -1; 1688 } 1689 } 1690 1691 if (ret) 1692 WARN_ON(1); 1693 1694 return ret; 1695 } 1696 late_initcall(rcu_tasks_verify_self_tests); 1697 #else /* #ifdef CONFIG_PROVE_RCU */ 1698 static void rcu_tasks_initiate_self_tests(void) { } 1699 #endif /* #else #ifdef CONFIG_PROVE_RCU */ 1700 1701 void __init rcu_init_tasks_generic(void) 1702 { 1703 #ifdef CONFIG_TASKS_RCU 1704 rcu_spawn_tasks_kthread(); 1705 #endif 1706 1707 #ifdef CONFIG_TASKS_RUDE_RCU 1708 rcu_spawn_tasks_rude_kthread(); 1709 #endif 1710 1711 #ifdef CONFIG_TASKS_TRACE_RCU 1712 rcu_spawn_tasks_trace_kthread(); 1713 #endif 1714 1715 // Run the self-tests. 1716 rcu_tasks_initiate_self_tests(); 1717 } 1718 1719 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ 1720 static inline void rcu_tasks_bootup_oddness(void) {} 1721 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ 1722