1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 #include <linux/nmi.h> 50 51 #include "rcu.h" 52 53 MODULE_LICENSE("GPL"); 54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 55 56 /* Bits for ->extendables field, extendables param, and related definitions. */ 57 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */ 58 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1) 59 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */ 60 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2) 61 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 62 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 63 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 64 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 65 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 66 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */ 67 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */ 68 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */ 69 #define RCUTORTURE_MAX_EXTEND \ 70 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 71 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 72 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 73 /* Must be power of two minus one. */ 74 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 75 76 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 77 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 78 torture_param(int, fqs_duration, 0, 79 "Duration of fqs bursts (us), 0 to disable"); 80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 82 torture_param(int, fwd_progress, 1, "Test grace-period forward progress"); 83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 84 torture_param(int, fwd_progress_holdoff, 60, 85 "Time between forward-progress tests (s)"); 86 torture_param(bool, fwd_progress_need_resched, 1, 87 "Hide cond_resched() behind need_resched()"); 88 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 89 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 90 torture_param(bool, gp_normal, false, 91 "Use normal (non-expedited) GP wait primitives"); 92 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 93 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 94 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 95 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 96 torture_param(int, n_barrier_cbs, 0, 97 "# of callbacks/kthreads for barrier testing"); 98 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 99 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 100 torture_param(int, object_debug, 0, 101 "Enable debug-object double call_rcu() testing"); 102 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 103 torture_param(int, onoff_interval, 0, 104 "Time between CPU hotplugs (jiffies), 0=disable"); 105 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 106 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 107 torture_param(int, read_exit_delay, 13, 108 "Delay between read-then-exit episodes (s)"); 109 torture_param(int, read_exit_burst, 16, 110 "# of read-then-exit bursts per episode, zero to disable"); 111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 114 torture_param(int, stall_cpu_holdoff, 10, 115 "Time to wait before starting stall (s)."); 116 torture_param(bool, stall_no_softlockup, false, 117 "Avoid softlockup warning during cpu stall."); 118 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 119 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 120 torture_param(int, stall_gp_kthread, 0, 121 "Grace-period kthread stall duration (s)."); 122 torture_param(int, stat_interval, 60, 123 "Number of seconds between stats printk()s"); 124 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 125 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 126 torture_param(int, test_boost_duration, 4, 127 "Duration of each boost test, seconds."); 128 torture_param(int, test_boost_interval, 7, 129 "Interval between boost tests, seconds."); 130 torture_param(bool, test_no_idle_hz, true, 131 "Test support for tickless idle CPUs"); 132 torture_param(int, verbose, 1, 133 "Enable verbose debugging printk()s"); 134 135 static char *torture_type = "rcu"; 136 module_param(torture_type, charp, 0444); 137 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 138 139 static int nrealnocbers; 140 static int nrealreaders; 141 static struct task_struct *writer_task; 142 static struct task_struct **fakewriter_tasks; 143 static struct task_struct **reader_tasks; 144 static struct task_struct **nocb_tasks; 145 static struct task_struct *stats_task; 146 static struct task_struct *fqs_task; 147 static struct task_struct *boost_tasks[NR_CPUS]; 148 static struct task_struct *stall_task; 149 static struct task_struct **fwd_prog_tasks; 150 static struct task_struct **barrier_cbs_tasks; 151 static struct task_struct *barrier_task; 152 static struct task_struct *read_exit_task; 153 154 #define RCU_TORTURE_PIPE_LEN 10 155 156 // Mailbox-like structure to check RCU global memory ordering. 157 struct rcu_torture_reader_check { 158 unsigned long rtc_myloops; 159 int rtc_chkrdr; 160 unsigned long rtc_chkloops; 161 int rtc_ready; 162 struct rcu_torture_reader_check *rtc_assigner; 163 } ____cacheline_internodealigned_in_smp; 164 165 // Update-side data structure used to check RCU readers. 166 struct rcu_torture { 167 struct rcu_head rtort_rcu; 168 int rtort_pipe_count; 169 struct list_head rtort_free; 170 int rtort_mbtest; 171 struct rcu_torture_reader_check *rtort_chkp; 172 }; 173 174 static LIST_HEAD(rcu_torture_freelist); 175 static struct rcu_torture __rcu *rcu_torture_current; 176 static unsigned long rcu_torture_current_version; 177 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 178 static DEFINE_SPINLOCK(rcu_torture_lock); 179 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 180 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 181 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 182 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 183 static atomic_t n_rcu_torture_alloc; 184 static atomic_t n_rcu_torture_alloc_fail; 185 static atomic_t n_rcu_torture_free; 186 static atomic_t n_rcu_torture_mberror; 187 static atomic_t n_rcu_torture_mbchk_fail; 188 static atomic_t n_rcu_torture_mbchk_tries; 189 static atomic_t n_rcu_torture_error; 190 static long n_rcu_torture_barrier_error; 191 static long n_rcu_torture_boost_ktrerror; 192 static long n_rcu_torture_boost_rterror; 193 static long n_rcu_torture_boost_failure; 194 static long n_rcu_torture_boosts; 195 static atomic_long_t n_rcu_torture_timers; 196 static long n_barrier_attempts; 197 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 198 static unsigned long n_read_exits; 199 static struct list_head rcu_torture_removed; 200 static unsigned long shutdown_jiffies; 201 static unsigned long start_gp_seq; 202 static atomic_long_t n_nocb_offload; 203 static atomic_long_t n_nocb_deoffload; 204 205 static int rcu_torture_writer_state; 206 #define RTWS_FIXED_DELAY 0 207 #define RTWS_DELAY 1 208 #define RTWS_REPLACE 2 209 #define RTWS_DEF_FREE 3 210 #define RTWS_EXP_SYNC 4 211 #define RTWS_COND_GET 5 212 #define RTWS_COND_SYNC 6 213 #define RTWS_POLL_GET 7 214 #define RTWS_POLL_WAIT 8 215 #define RTWS_SYNC 9 216 #define RTWS_STUTTER 10 217 #define RTWS_STOPPING 11 218 static const char * const rcu_torture_writer_state_names[] = { 219 "RTWS_FIXED_DELAY", 220 "RTWS_DELAY", 221 "RTWS_REPLACE", 222 "RTWS_DEF_FREE", 223 "RTWS_EXP_SYNC", 224 "RTWS_COND_GET", 225 "RTWS_COND_SYNC", 226 "RTWS_POLL_GET", 227 "RTWS_POLL_WAIT", 228 "RTWS_SYNC", 229 "RTWS_STUTTER", 230 "RTWS_STOPPING", 231 }; 232 233 /* Record reader segment types and duration for first failing read. */ 234 struct rt_read_seg { 235 int rt_readstate; 236 unsigned long rt_delay_jiffies; 237 unsigned long rt_delay_ms; 238 unsigned long rt_delay_us; 239 bool rt_preempted; 240 }; 241 static int err_segs_recorded; 242 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 243 static int rt_read_nsegs; 244 245 static const char *rcu_torture_writer_state_getname(void) 246 { 247 unsigned int i = READ_ONCE(rcu_torture_writer_state); 248 249 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 250 return "???"; 251 return rcu_torture_writer_state_names[i]; 252 } 253 254 #ifdef CONFIG_RCU_TRACE 255 static u64 notrace rcu_trace_clock_local(void) 256 { 257 u64 ts = trace_clock_local(); 258 259 (void)do_div(ts, NSEC_PER_USEC); 260 return ts; 261 } 262 #else /* #ifdef CONFIG_RCU_TRACE */ 263 static u64 notrace rcu_trace_clock_local(void) 264 { 265 return 0ULL; 266 } 267 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 268 269 /* 270 * Stop aggressive CPU-hog tests a bit before the end of the test in order 271 * to avoid interfering with test shutdown. 272 */ 273 static bool shutdown_time_arrived(void) 274 { 275 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 276 } 277 278 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 279 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 280 /* and boost task create/destroy. */ 281 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 282 static bool barrier_phase; /* Test phase. */ 283 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 286 287 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 288 289 /* 290 * Allocate an element from the rcu_tortures pool. 291 */ 292 static struct rcu_torture * 293 rcu_torture_alloc(void) 294 { 295 struct list_head *p; 296 297 spin_lock_bh(&rcu_torture_lock); 298 if (list_empty(&rcu_torture_freelist)) { 299 atomic_inc(&n_rcu_torture_alloc_fail); 300 spin_unlock_bh(&rcu_torture_lock); 301 return NULL; 302 } 303 atomic_inc(&n_rcu_torture_alloc); 304 p = rcu_torture_freelist.next; 305 list_del_init(p); 306 spin_unlock_bh(&rcu_torture_lock); 307 return container_of(p, struct rcu_torture, rtort_free); 308 } 309 310 /* 311 * Free an element to the rcu_tortures pool. 312 */ 313 static void 314 rcu_torture_free(struct rcu_torture *p) 315 { 316 atomic_inc(&n_rcu_torture_free); 317 spin_lock_bh(&rcu_torture_lock); 318 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 319 spin_unlock_bh(&rcu_torture_lock); 320 } 321 322 /* 323 * Operations vector for selecting different types of tests. 324 */ 325 326 struct rcu_torture_ops { 327 int ttype; 328 void (*init)(void); 329 void (*cleanup)(void); 330 int (*readlock)(void); 331 void (*read_delay)(struct torture_random_state *rrsp, 332 struct rt_read_seg *rtrsp); 333 void (*readunlock)(int idx); 334 int (*readlock_held)(void); 335 unsigned long (*get_gp_seq)(void); 336 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 337 void (*deferred_free)(struct rcu_torture *p); 338 void (*sync)(void); 339 void (*exp_sync)(void); 340 unsigned long (*get_gp_state)(void); 341 unsigned long (*start_gp_poll)(void); 342 bool (*poll_gp_state)(unsigned long oldstate); 343 void (*cond_sync)(unsigned long oldstate); 344 call_rcu_func_t call; 345 void (*cb_barrier)(void); 346 void (*fqs)(void); 347 void (*stats)(void); 348 void (*gp_kthread_dbg)(void); 349 bool (*check_boost_failed)(unsigned long gp_state, int *cpup); 350 int (*stall_dur)(void); 351 long cbflood_max; 352 int irq_capable; 353 int can_boost; 354 int extendables; 355 int slow_gps; 356 int no_pi_lock; 357 const char *name; 358 }; 359 360 static struct rcu_torture_ops *cur_ops; 361 362 /* 363 * Definitions for rcu torture testing. 364 */ 365 366 static int torture_readlock_not_held(void) 367 { 368 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held(); 369 } 370 371 static int rcu_torture_read_lock(void) __acquires(RCU) 372 { 373 rcu_read_lock(); 374 return 0; 375 } 376 377 static void 378 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 379 { 380 unsigned long started; 381 unsigned long completed; 382 const unsigned long shortdelay_us = 200; 383 unsigned long longdelay_ms = 300; 384 unsigned long long ts; 385 386 /* We want a short delay sometimes to make a reader delay the grace 387 * period, and we want a long delay occasionally to trigger 388 * force_quiescent_state. */ 389 390 if (!atomic_read(&rcu_fwd_cb_nodelay) && 391 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 392 started = cur_ops->get_gp_seq(); 393 ts = rcu_trace_clock_local(); 394 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 395 longdelay_ms = 5; /* Avoid triggering BH limits. */ 396 mdelay(longdelay_ms); 397 rtrsp->rt_delay_ms = longdelay_ms; 398 completed = cur_ops->get_gp_seq(); 399 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 400 started, completed); 401 } 402 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 403 udelay(shortdelay_us); 404 rtrsp->rt_delay_us = shortdelay_us; 405 } 406 if (!preempt_count() && 407 !(torture_random(rrsp) % (nrealreaders * 500))) { 408 torture_preempt_schedule(); /* QS only if preemptible. */ 409 rtrsp->rt_preempted = true; 410 } 411 } 412 413 static void rcu_torture_read_unlock(int idx) __releases(RCU) 414 { 415 rcu_read_unlock(); 416 } 417 418 /* 419 * Update callback in the pipe. This should be invoked after a grace period. 420 */ 421 static bool 422 rcu_torture_pipe_update_one(struct rcu_torture *rp) 423 { 424 int i; 425 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 426 427 if (rtrcp) { 428 WRITE_ONCE(rp->rtort_chkp, NULL); 429 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 430 } 431 i = READ_ONCE(rp->rtort_pipe_count); 432 if (i > RCU_TORTURE_PIPE_LEN) 433 i = RCU_TORTURE_PIPE_LEN; 434 atomic_inc(&rcu_torture_wcount[i]); 435 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 436 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 437 rp->rtort_mbtest = 0; 438 return true; 439 } 440 return false; 441 } 442 443 /* 444 * Update all callbacks in the pipe. Suitable for synchronous grace-period 445 * primitives. 446 */ 447 static void 448 rcu_torture_pipe_update(struct rcu_torture *old_rp) 449 { 450 struct rcu_torture *rp; 451 struct rcu_torture *rp1; 452 453 if (old_rp) 454 list_add(&old_rp->rtort_free, &rcu_torture_removed); 455 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 456 if (rcu_torture_pipe_update_one(rp)) { 457 list_del(&rp->rtort_free); 458 rcu_torture_free(rp); 459 } 460 } 461 } 462 463 static void 464 rcu_torture_cb(struct rcu_head *p) 465 { 466 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 467 468 if (torture_must_stop_irq()) { 469 /* Test is ending, just drop callbacks on the floor. */ 470 /* The next initialization will pick up the pieces. */ 471 return; 472 } 473 if (rcu_torture_pipe_update_one(rp)) 474 rcu_torture_free(rp); 475 else 476 cur_ops->deferred_free(rp); 477 } 478 479 static unsigned long rcu_no_completed(void) 480 { 481 return 0; 482 } 483 484 static void rcu_torture_deferred_free(struct rcu_torture *p) 485 { 486 call_rcu(&p->rtort_rcu, rcu_torture_cb); 487 } 488 489 static void rcu_sync_torture_init(void) 490 { 491 INIT_LIST_HEAD(&rcu_torture_removed); 492 } 493 494 static struct rcu_torture_ops rcu_ops = { 495 .ttype = RCU_FLAVOR, 496 .init = rcu_sync_torture_init, 497 .readlock = rcu_torture_read_lock, 498 .read_delay = rcu_read_delay, 499 .readunlock = rcu_torture_read_unlock, 500 .readlock_held = torture_readlock_not_held, 501 .get_gp_seq = rcu_get_gp_seq, 502 .gp_diff = rcu_seq_diff, 503 .deferred_free = rcu_torture_deferred_free, 504 .sync = synchronize_rcu, 505 .exp_sync = synchronize_rcu_expedited, 506 .get_gp_state = get_state_synchronize_rcu, 507 .start_gp_poll = start_poll_synchronize_rcu, 508 .poll_gp_state = poll_state_synchronize_rcu, 509 .cond_sync = cond_synchronize_rcu, 510 .call = call_rcu, 511 .cb_barrier = rcu_barrier, 512 .fqs = rcu_force_quiescent_state, 513 .stats = NULL, 514 .gp_kthread_dbg = show_rcu_gp_kthreads, 515 .check_boost_failed = rcu_check_boost_fail, 516 .stall_dur = rcu_jiffies_till_stall_check, 517 .irq_capable = 1, 518 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST), 519 .extendables = RCUTORTURE_MAX_EXTEND, 520 .name = "rcu" 521 }; 522 523 /* 524 * Don't even think about trying any of these in real life!!! 525 * The names includes "busted", and they really means it! 526 * The only purpose of these functions is to provide a buggy RCU 527 * implementation to make sure that rcutorture correctly emits 528 * buggy-RCU error messages. 529 */ 530 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 531 { 532 /* This is a deliberate bug for testing purposes only! */ 533 rcu_torture_cb(&p->rtort_rcu); 534 } 535 536 static void synchronize_rcu_busted(void) 537 { 538 /* This is a deliberate bug for testing purposes only! */ 539 } 540 541 static void 542 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 543 { 544 /* This is a deliberate bug for testing purposes only! */ 545 func(head); 546 } 547 548 static struct rcu_torture_ops rcu_busted_ops = { 549 .ttype = INVALID_RCU_FLAVOR, 550 .init = rcu_sync_torture_init, 551 .readlock = rcu_torture_read_lock, 552 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 553 .readunlock = rcu_torture_read_unlock, 554 .readlock_held = torture_readlock_not_held, 555 .get_gp_seq = rcu_no_completed, 556 .deferred_free = rcu_busted_torture_deferred_free, 557 .sync = synchronize_rcu_busted, 558 .exp_sync = synchronize_rcu_busted, 559 .call = call_rcu_busted, 560 .cb_barrier = NULL, 561 .fqs = NULL, 562 .stats = NULL, 563 .irq_capable = 1, 564 .name = "busted" 565 }; 566 567 /* 568 * Definitions for srcu torture testing. 569 */ 570 571 DEFINE_STATIC_SRCU(srcu_ctl); 572 static struct srcu_struct srcu_ctld; 573 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 574 575 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 576 { 577 return srcu_read_lock(srcu_ctlp); 578 } 579 580 static void 581 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 582 { 583 long delay; 584 const long uspertick = 1000000 / HZ; 585 const long longdelay = 10; 586 587 /* We want there to be long-running readers, but not all the time. */ 588 589 delay = torture_random(rrsp) % 590 (nrealreaders * 2 * longdelay * uspertick); 591 if (!delay && in_task()) { 592 schedule_timeout_interruptible(longdelay); 593 rtrsp->rt_delay_jiffies = longdelay; 594 } else { 595 rcu_read_delay(rrsp, rtrsp); 596 } 597 } 598 599 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 600 { 601 srcu_read_unlock(srcu_ctlp, idx); 602 } 603 604 static int torture_srcu_read_lock_held(void) 605 { 606 return srcu_read_lock_held(srcu_ctlp); 607 } 608 609 static unsigned long srcu_torture_completed(void) 610 { 611 return srcu_batches_completed(srcu_ctlp); 612 } 613 614 static void srcu_torture_deferred_free(struct rcu_torture *rp) 615 { 616 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 617 } 618 619 static void srcu_torture_synchronize(void) 620 { 621 synchronize_srcu(srcu_ctlp); 622 } 623 624 static unsigned long srcu_torture_get_gp_state(void) 625 { 626 return get_state_synchronize_srcu(srcu_ctlp); 627 } 628 629 static unsigned long srcu_torture_start_gp_poll(void) 630 { 631 return start_poll_synchronize_srcu(srcu_ctlp); 632 } 633 634 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 635 { 636 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 637 } 638 639 static void srcu_torture_call(struct rcu_head *head, 640 rcu_callback_t func) 641 { 642 call_srcu(srcu_ctlp, head, func); 643 } 644 645 static void srcu_torture_barrier(void) 646 { 647 srcu_barrier(srcu_ctlp); 648 } 649 650 static void srcu_torture_stats(void) 651 { 652 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 653 } 654 655 static void srcu_torture_synchronize_expedited(void) 656 { 657 synchronize_srcu_expedited(srcu_ctlp); 658 } 659 660 static struct rcu_torture_ops srcu_ops = { 661 .ttype = SRCU_FLAVOR, 662 .init = rcu_sync_torture_init, 663 .readlock = srcu_torture_read_lock, 664 .read_delay = srcu_read_delay, 665 .readunlock = srcu_torture_read_unlock, 666 .readlock_held = torture_srcu_read_lock_held, 667 .get_gp_seq = srcu_torture_completed, 668 .deferred_free = srcu_torture_deferred_free, 669 .sync = srcu_torture_synchronize, 670 .exp_sync = srcu_torture_synchronize_expedited, 671 .get_gp_state = srcu_torture_get_gp_state, 672 .start_gp_poll = srcu_torture_start_gp_poll, 673 .poll_gp_state = srcu_torture_poll_gp_state, 674 .call = srcu_torture_call, 675 .cb_barrier = srcu_torture_barrier, 676 .stats = srcu_torture_stats, 677 .cbflood_max = 50000, 678 .irq_capable = 1, 679 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 680 .name = "srcu" 681 }; 682 683 static void srcu_torture_init(void) 684 { 685 rcu_sync_torture_init(); 686 WARN_ON(init_srcu_struct(&srcu_ctld)); 687 srcu_ctlp = &srcu_ctld; 688 } 689 690 static void srcu_torture_cleanup(void) 691 { 692 cleanup_srcu_struct(&srcu_ctld); 693 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 694 } 695 696 /* As above, but dynamically allocated. */ 697 static struct rcu_torture_ops srcud_ops = { 698 .ttype = SRCU_FLAVOR, 699 .init = srcu_torture_init, 700 .cleanup = srcu_torture_cleanup, 701 .readlock = srcu_torture_read_lock, 702 .read_delay = srcu_read_delay, 703 .readunlock = srcu_torture_read_unlock, 704 .readlock_held = torture_srcu_read_lock_held, 705 .get_gp_seq = srcu_torture_completed, 706 .deferred_free = srcu_torture_deferred_free, 707 .sync = srcu_torture_synchronize, 708 .exp_sync = srcu_torture_synchronize_expedited, 709 .call = srcu_torture_call, 710 .cb_barrier = srcu_torture_barrier, 711 .stats = srcu_torture_stats, 712 .cbflood_max = 50000, 713 .irq_capable = 1, 714 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 715 .name = "srcud" 716 }; 717 718 /* As above, but broken due to inappropriate reader extension. */ 719 static struct rcu_torture_ops busted_srcud_ops = { 720 .ttype = SRCU_FLAVOR, 721 .init = srcu_torture_init, 722 .cleanup = srcu_torture_cleanup, 723 .readlock = srcu_torture_read_lock, 724 .read_delay = rcu_read_delay, 725 .readunlock = srcu_torture_read_unlock, 726 .readlock_held = torture_srcu_read_lock_held, 727 .get_gp_seq = srcu_torture_completed, 728 .deferred_free = srcu_torture_deferred_free, 729 .sync = srcu_torture_synchronize, 730 .exp_sync = srcu_torture_synchronize_expedited, 731 .call = srcu_torture_call, 732 .cb_barrier = srcu_torture_barrier, 733 .stats = srcu_torture_stats, 734 .irq_capable = 1, 735 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU), 736 .extendables = RCUTORTURE_MAX_EXTEND, 737 .name = "busted_srcud" 738 }; 739 740 /* 741 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 742 * This implementation does not necessarily work well with CPU hotplug. 743 */ 744 745 static void synchronize_rcu_trivial(void) 746 { 747 int cpu; 748 749 for_each_online_cpu(cpu) { 750 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 751 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 752 } 753 } 754 755 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 756 { 757 preempt_disable(); 758 return 0; 759 } 760 761 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 762 { 763 preempt_enable(); 764 } 765 766 static struct rcu_torture_ops trivial_ops = { 767 .ttype = RCU_TRIVIAL_FLAVOR, 768 .init = rcu_sync_torture_init, 769 .readlock = rcu_torture_read_lock_trivial, 770 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 771 .readunlock = rcu_torture_read_unlock_trivial, 772 .readlock_held = torture_readlock_not_held, 773 .get_gp_seq = rcu_no_completed, 774 .sync = synchronize_rcu_trivial, 775 .exp_sync = synchronize_rcu_trivial, 776 .fqs = NULL, 777 .stats = NULL, 778 .irq_capable = 1, 779 .name = "trivial" 780 }; 781 782 #ifdef CONFIG_TASKS_RCU 783 784 /* 785 * Definitions for RCU-tasks torture testing. 786 */ 787 788 static int tasks_torture_read_lock(void) 789 { 790 return 0; 791 } 792 793 static void tasks_torture_read_unlock(int idx) 794 { 795 } 796 797 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 798 { 799 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 800 } 801 802 static void synchronize_rcu_mult_test(void) 803 { 804 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 805 } 806 807 static struct rcu_torture_ops tasks_ops = { 808 .ttype = RCU_TASKS_FLAVOR, 809 .init = rcu_sync_torture_init, 810 .readlock = tasks_torture_read_lock, 811 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 812 .readunlock = tasks_torture_read_unlock, 813 .get_gp_seq = rcu_no_completed, 814 .deferred_free = rcu_tasks_torture_deferred_free, 815 .sync = synchronize_rcu_tasks, 816 .exp_sync = synchronize_rcu_mult_test, 817 .call = call_rcu_tasks, 818 .cb_barrier = rcu_barrier_tasks, 819 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 820 .fqs = NULL, 821 .stats = NULL, 822 .irq_capable = 1, 823 .slow_gps = 1, 824 .name = "tasks" 825 }; 826 827 #define TASKS_OPS &tasks_ops, 828 829 #else // #ifdef CONFIG_TASKS_RCU 830 831 #define TASKS_OPS 832 833 #endif // #else #ifdef CONFIG_TASKS_RCU 834 835 836 /* 837 * Definitions for rude RCU-tasks torture testing. 838 */ 839 840 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 841 { 842 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 843 } 844 845 static struct rcu_torture_ops tasks_rude_ops = { 846 .ttype = RCU_TASKS_RUDE_FLAVOR, 847 .init = rcu_sync_torture_init, 848 .readlock = rcu_torture_read_lock_trivial, 849 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 850 .readunlock = rcu_torture_read_unlock_trivial, 851 .get_gp_seq = rcu_no_completed, 852 .deferred_free = rcu_tasks_rude_torture_deferred_free, 853 .sync = synchronize_rcu_tasks_rude, 854 .exp_sync = synchronize_rcu_tasks_rude, 855 .call = call_rcu_tasks_rude, 856 .cb_barrier = rcu_barrier_tasks_rude, 857 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 858 .cbflood_max = 50000, 859 .fqs = NULL, 860 .stats = NULL, 861 .irq_capable = 1, 862 .name = "tasks-rude" 863 }; 864 865 #ifdef CONFIG_TASKS_TRACE_RCU 866 867 /* 868 * Definitions for tracing RCU-tasks torture testing. 869 */ 870 871 static int tasks_tracing_torture_read_lock(void) 872 { 873 rcu_read_lock_trace(); 874 return 0; 875 } 876 877 static void tasks_tracing_torture_read_unlock(int idx) 878 { 879 rcu_read_unlock_trace(); 880 } 881 882 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 883 { 884 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 885 } 886 887 static struct rcu_torture_ops tasks_tracing_ops = { 888 .ttype = RCU_TASKS_TRACING_FLAVOR, 889 .init = rcu_sync_torture_init, 890 .readlock = tasks_tracing_torture_read_lock, 891 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 892 .readunlock = tasks_tracing_torture_read_unlock, 893 .readlock_held = rcu_read_lock_trace_held, 894 .get_gp_seq = rcu_no_completed, 895 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 896 .sync = synchronize_rcu_tasks_trace, 897 .exp_sync = synchronize_rcu_tasks_trace, 898 .call = call_rcu_tasks_trace, 899 .cb_barrier = rcu_barrier_tasks_trace, 900 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 901 .cbflood_max = 50000, 902 .fqs = NULL, 903 .stats = NULL, 904 .irq_capable = 1, 905 .slow_gps = 1, 906 .name = "tasks-tracing" 907 }; 908 909 #define TASKS_TRACING_OPS &tasks_tracing_ops, 910 911 #else // #ifdef CONFIG_TASKS_TRACE_RCU 912 913 #define TASKS_TRACING_OPS 914 915 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU 916 917 918 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 919 { 920 if (!cur_ops->gp_diff) 921 return new - old; 922 return cur_ops->gp_diff(new, old); 923 } 924 925 /* 926 * RCU torture priority-boost testing. Runs one real-time thread per 927 * CPU for moderate bursts, repeatedly starting grace periods and waiting 928 * for them to complete. If a given grace period takes too long, we assume 929 * that priority inversion has occurred. 930 */ 931 932 static int old_rt_runtime = -1; 933 934 static void rcu_torture_disable_rt_throttle(void) 935 { 936 /* 937 * Disable RT throttling so that rcutorture's boost threads don't get 938 * throttled. Only possible if rcutorture is built-in otherwise the 939 * user should manually do this by setting the sched_rt_period_us and 940 * sched_rt_runtime sysctls. 941 */ 942 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 943 return; 944 945 old_rt_runtime = sysctl_sched_rt_runtime; 946 sysctl_sched_rt_runtime = -1; 947 } 948 949 static void rcu_torture_enable_rt_throttle(void) 950 { 951 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 952 return; 953 954 sysctl_sched_rt_runtime = old_rt_runtime; 955 old_rt_runtime = -1; 956 } 957 958 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start) 959 { 960 int cpu; 961 static int dbg_done; 962 unsigned long end = jiffies; 963 bool gp_done; 964 unsigned long j; 965 static unsigned long last_persist; 966 unsigned long lp; 967 unsigned long mininterval = test_boost_duration * HZ - HZ / 2; 968 969 if (end - *start > mininterval) { 970 // Recheck after checking time to avoid false positives. 971 smp_mb(); // Time check before grace-period check. 972 if (cur_ops->poll_gp_state(gp_state)) 973 return false; // passed, though perhaps just barely 974 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) { 975 // At most one persisted message per boost test. 976 j = jiffies; 977 lp = READ_ONCE(last_persist); 978 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp) 979 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu); 980 return false; // passed on a technicality 981 } 982 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 983 n_rcu_torture_boost_failure++; 984 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) { 985 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n", 986 current->rt_priority, gp_state, end - *start); 987 cur_ops->gp_kthread_dbg(); 988 // Recheck after print to flag grace period ending during splat. 989 gp_done = cur_ops->poll_gp_state(gp_state); 990 pr_info("Boost inversion: GP %lu %s.\n", gp_state, 991 gp_done ? "ended already" : "still pending"); 992 993 } 994 995 return true; // failed 996 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) { 997 *start = jiffies; 998 } 999 1000 return false; // passed 1001 } 1002 1003 static int rcu_torture_boost(void *arg) 1004 { 1005 unsigned long endtime; 1006 unsigned long gp_state; 1007 unsigned long gp_state_time; 1008 unsigned long oldstarttime; 1009 1010 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 1011 1012 /* Set real-time priority. */ 1013 sched_set_fifo_low(current); 1014 1015 /* Each pass through the following loop does one boost-test cycle. */ 1016 do { 1017 bool failed = false; // Test failed already in this test interval 1018 bool gp_initiated = false; 1019 1020 if (kthread_should_stop()) 1021 goto checkwait; 1022 1023 /* Wait for the next test interval. */ 1024 oldstarttime = READ_ONCE(boost_starttime); 1025 while (time_before(jiffies, oldstarttime)) { 1026 schedule_timeout_interruptible(oldstarttime - jiffies); 1027 if (stutter_wait("rcu_torture_boost")) 1028 sched_set_fifo_low(current); 1029 if (torture_must_stop()) 1030 goto checkwait; 1031 } 1032 1033 // Do one boost-test interval. 1034 endtime = oldstarttime + test_boost_duration * HZ; 1035 while (time_before(jiffies, endtime)) { 1036 // Has current GP gone too long? 1037 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1038 failed = rcu_torture_boost_failed(gp_state, &gp_state_time); 1039 // If we don't have a grace period in flight, start one. 1040 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) { 1041 gp_state = cur_ops->start_gp_poll(); 1042 gp_initiated = true; 1043 gp_state_time = jiffies; 1044 } 1045 if (stutter_wait("rcu_torture_boost")) { 1046 sched_set_fifo_low(current); 1047 // If the grace period already ended, 1048 // we don't know when that happened, so 1049 // start over. 1050 if (cur_ops->poll_gp_state(gp_state)) 1051 gp_initiated = false; 1052 } 1053 if (torture_must_stop()) 1054 goto checkwait; 1055 } 1056 1057 // In case the grace period extended beyond the end of the loop. 1058 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state)) 1059 rcu_torture_boost_failed(gp_state, &gp_state_time); 1060 1061 /* 1062 * Set the start time of the next test interval. 1063 * Yes, this is vulnerable to long delays, but such 1064 * delays simply cause a false negative for the next 1065 * interval. Besides, we are running at RT priority, 1066 * so delays should be relatively rare. 1067 */ 1068 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) { 1069 if (mutex_trylock(&boost_mutex)) { 1070 if (oldstarttime == boost_starttime) { 1071 WRITE_ONCE(boost_starttime, 1072 jiffies + test_boost_interval * HZ); 1073 n_rcu_torture_boosts++; 1074 } 1075 mutex_unlock(&boost_mutex); 1076 break; 1077 } 1078 schedule_timeout_uninterruptible(1); 1079 } 1080 1081 /* Go do the stutter. */ 1082 checkwait: if (stutter_wait("rcu_torture_boost")) 1083 sched_set_fifo_low(current); 1084 } while (!torture_must_stop()); 1085 1086 /* Clean up and exit. */ 1087 while (!kthread_should_stop()) { 1088 torture_shutdown_absorb("rcu_torture_boost"); 1089 schedule_timeout_uninterruptible(1); 1090 } 1091 torture_kthread_stopping("rcu_torture_boost"); 1092 return 0; 1093 } 1094 1095 /* 1096 * RCU torture force-quiescent-state kthread. Repeatedly induces 1097 * bursts of calls to force_quiescent_state(), increasing the probability 1098 * of occurrence of some important types of race conditions. 1099 */ 1100 static int 1101 rcu_torture_fqs(void *arg) 1102 { 1103 unsigned long fqs_resume_time; 1104 int fqs_burst_remaining; 1105 int oldnice = task_nice(current); 1106 1107 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1108 do { 1109 fqs_resume_time = jiffies + fqs_stutter * HZ; 1110 while (time_before(jiffies, fqs_resume_time) && 1111 !kthread_should_stop()) { 1112 schedule_timeout_interruptible(1); 1113 } 1114 fqs_burst_remaining = fqs_duration; 1115 while (fqs_burst_remaining > 0 && 1116 !kthread_should_stop()) { 1117 cur_ops->fqs(); 1118 udelay(fqs_holdoff); 1119 fqs_burst_remaining -= fqs_holdoff; 1120 } 1121 if (stutter_wait("rcu_torture_fqs")) 1122 sched_set_normal(current, oldnice); 1123 } while (!torture_must_stop()); 1124 torture_kthread_stopping("rcu_torture_fqs"); 1125 return 0; 1126 } 1127 1128 // Used by writers to randomly choose from the available grace-period 1129 // primitives. The only purpose of the initialization is to size the array. 1130 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; 1131 static int nsynctypes; 1132 1133 /* 1134 * Determine which grace-period primitives are available. 1135 */ 1136 static void rcu_torture_write_types(void) 1137 { 1138 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1139 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; 1140 1141 /* Initialize synctype[] array. If none set, take default. */ 1142 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) 1143 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true; 1144 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1145 synctype[nsynctypes++] = RTWS_COND_GET; 1146 pr_info("%s: Testing conditional GPs.\n", __func__); 1147 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1148 pr_alert("%s: gp_cond without primitives.\n", __func__); 1149 } 1150 if (gp_exp1 && cur_ops->exp_sync) { 1151 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1152 pr_info("%s: Testing expedited GPs.\n", __func__); 1153 } else if (gp_exp && !cur_ops->exp_sync) { 1154 pr_alert("%s: gp_exp without primitives.\n", __func__); 1155 } 1156 if (gp_normal1 && cur_ops->deferred_free) { 1157 synctype[nsynctypes++] = RTWS_DEF_FREE; 1158 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1159 } else if (gp_normal && !cur_ops->deferred_free) { 1160 pr_alert("%s: gp_normal without primitives.\n", __func__); 1161 } 1162 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1163 synctype[nsynctypes++] = RTWS_POLL_GET; 1164 pr_info("%s: Testing polling GPs.\n", __func__); 1165 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1166 pr_alert("%s: gp_poll without primitives.\n", __func__); 1167 } 1168 if (gp_sync1 && cur_ops->sync) { 1169 synctype[nsynctypes++] = RTWS_SYNC; 1170 pr_info("%s: Testing normal GPs.\n", __func__); 1171 } else if (gp_sync && !cur_ops->sync) { 1172 pr_alert("%s: gp_sync without primitives.\n", __func__); 1173 } 1174 } 1175 1176 /* 1177 * RCU torture writer kthread. Repeatedly substitutes a new structure 1178 * for that pointed to by rcu_torture_current, freeing the old structure 1179 * after a series of grace periods (the "pipeline"). 1180 */ 1181 static int 1182 rcu_torture_writer(void *arg) 1183 { 1184 bool boot_ended; 1185 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1186 unsigned long cookie; 1187 int expediting = 0; 1188 unsigned long gp_snap; 1189 int i; 1190 int idx; 1191 int oldnice = task_nice(current); 1192 struct rcu_torture *rp; 1193 struct rcu_torture *old_rp; 1194 static DEFINE_TORTURE_RANDOM(rand); 1195 bool stutter_waited; 1196 1197 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1198 if (!can_expedite) 1199 pr_alert("%s" TORTURE_FLAG 1200 " GP expediting controlled from boot/sysfs for %s.\n", 1201 torture_type, cur_ops->name); 1202 if (WARN_ONCE(nsynctypes == 0, 1203 "rcu_torture_writer: No update-side primitives.\n")) { 1204 /* 1205 * No updates primitives, so don't try updating. 1206 * The resulting test won't be testing much, hence the 1207 * above WARN_ONCE(). 1208 */ 1209 rcu_torture_writer_state = RTWS_STOPPING; 1210 torture_kthread_stopping("rcu_torture_writer"); 1211 } 1212 1213 do { 1214 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1215 torture_hrtimeout_us(500, 1000, &rand); 1216 rp = rcu_torture_alloc(); 1217 if (rp == NULL) 1218 continue; 1219 rp->rtort_pipe_count = 0; 1220 rcu_torture_writer_state = RTWS_DELAY; 1221 udelay(torture_random(&rand) & 0x3ff); 1222 rcu_torture_writer_state = RTWS_REPLACE; 1223 old_rp = rcu_dereference_check(rcu_torture_current, 1224 current == writer_task); 1225 rp->rtort_mbtest = 1; 1226 rcu_assign_pointer(rcu_torture_current, rp); 1227 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1228 if (old_rp) { 1229 i = old_rp->rtort_pipe_count; 1230 if (i > RCU_TORTURE_PIPE_LEN) 1231 i = RCU_TORTURE_PIPE_LEN; 1232 atomic_inc(&rcu_torture_wcount[i]); 1233 WRITE_ONCE(old_rp->rtort_pipe_count, 1234 old_rp->rtort_pipe_count + 1); 1235 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1236 idx = cur_ops->readlock(); 1237 cookie = cur_ops->get_gp_state(); 1238 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && 1239 cur_ops->poll_gp_state(cookie), 1240 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1241 __func__, 1242 rcu_torture_writer_state_getname(), 1243 rcu_torture_writer_state, 1244 cookie, cur_ops->get_gp_state()); 1245 cur_ops->readunlock(idx); 1246 } 1247 switch (synctype[torture_random(&rand) % nsynctypes]) { 1248 case RTWS_DEF_FREE: 1249 rcu_torture_writer_state = RTWS_DEF_FREE; 1250 cur_ops->deferred_free(old_rp); 1251 break; 1252 case RTWS_EXP_SYNC: 1253 rcu_torture_writer_state = RTWS_EXP_SYNC; 1254 cur_ops->exp_sync(); 1255 rcu_torture_pipe_update(old_rp); 1256 break; 1257 case RTWS_COND_GET: 1258 rcu_torture_writer_state = RTWS_COND_GET; 1259 gp_snap = cur_ops->get_gp_state(); 1260 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1261 rcu_torture_writer_state = RTWS_COND_SYNC; 1262 cur_ops->cond_sync(gp_snap); 1263 rcu_torture_pipe_update(old_rp); 1264 break; 1265 case RTWS_POLL_GET: 1266 rcu_torture_writer_state = RTWS_POLL_GET; 1267 gp_snap = cur_ops->start_gp_poll(); 1268 rcu_torture_writer_state = RTWS_POLL_WAIT; 1269 while (!cur_ops->poll_gp_state(gp_snap)) 1270 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1271 &rand); 1272 rcu_torture_pipe_update(old_rp); 1273 break; 1274 case RTWS_SYNC: 1275 rcu_torture_writer_state = RTWS_SYNC; 1276 cur_ops->sync(); 1277 rcu_torture_pipe_update(old_rp); 1278 break; 1279 default: 1280 WARN_ON_ONCE(1); 1281 break; 1282 } 1283 } 1284 WRITE_ONCE(rcu_torture_current_version, 1285 rcu_torture_current_version + 1); 1286 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1287 if (can_expedite && 1288 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1289 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1290 if (expediting >= 0) 1291 rcu_expedite_gp(); 1292 else 1293 rcu_unexpedite_gp(); 1294 if (++expediting > 3) 1295 expediting = -expediting; 1296 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1297 can_expedite = !rcu_gp_is_expedited() && 1298 !rcu_gp_is_normal(); 1299 } 1300 rcu_torture_writer_state = RTWS_STUTTER; 1301 boot_ended = rcu_inkernel_boot_has_ended(); 1302 stutter_waited = stutter_wait("rcu_torture_writer"); 1303 if (stutter_waited && 1304 !atomic_read(&rcu_fwd_cb_nodelay) && 1305 !cur_ops->slow_gps && 1306 !torture_must_stop() && 1307 boot_ended) 1308 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1309 if (list_empty(&rcu_tortures[i].rtort_free) && 1310 rcu_access_pointer(rcu_torture_current) != 1311 &rcu_tortures[i]) { 1312 rcu_ftrace_dump(DUMP_ALL); 1313 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1314 } 1315 if (stutter_waited) 1316 sched_set_normal(current, oldnice); 1317 } while (!torture_must_stop()); 1318 rcu_torture_current = NULL; // Let stats task know that we are done. 1319 /* Reset expediting back to unexpedited. */ 1320 if (expediting > 0) 1321 expediting = -expediting; 1322 while (can_expedite && expediting++ < 0) 1323 rcu_unexpedite_gp(); 1324 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1325 if (!can_expedite) 1326 pr_alert("%s" TORTURE_FLAG 1327 " Dynamic grace-period expediting was disabled.\n", 1328 torture_type); 1329 rcu_torture_writer_state = RTWS_STOPPING; 1330 torture_kthread_stopping("rcu_torture_writer"); 1331 return 0; 1332 } 1333 1334 /* 1335 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1336 * delay between calls. 1337 */ 1338 static int 1339 rcu_torture_fakewriter(void *arg) 1340 { 1341 unsigned long gp_snap; 1342 DEFINE_TORTURE_RANDOM(rand); 1343 1344 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1345 set_user_nice(current, MAX_NICE); 1346 1347 do { 1348 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1349 if (cur_ops->cb_barrier != NULL && 1350 torture_random(&rand) % (nfakewriters * 8) == 0) { 1351 cur_ops->cb_barrier(); 1352 } else { 1353 switch (synctype[torture_random(&rand) % nsynctypes]) { 1354 case RTWS_DEF_FREE: 1355 break; 1356 case RTWS_EXP_SYNC: 1357 cur_ops->exp_sync(); 1358 break; 1359 case RTWS_COND_GET: 1360 gp_snap = cur_ops->get_gp_state(); 1361 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1362 cur_ops->cond_sync(gp_snap); 1363 break; 1364 case RTWS_POLL_GET: 1365 gp_snap = cur_ops->start_gp_poll(); 1366 while (!cur_ops->poll_gp_state(gp_snap)) { 1367 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1368 &rand); 1369 } 1370 break; 1371 case RTWS_SYNC: 1372 cur_ops->sync(); 1373 break; 1374 default: 1375 WARN_ON_ONCE(1); 1376 break; 1377 } 1378 } 1379 stutter_wait("rcu_torture_fakewriter"); 1380 } while (!torture_must_stop()); 1381 1382 torture_kthread_stopping("rcu_torture_fakewriter"); 1383 return 0; 1384 } 1385 1386 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1387 { 1388 kfree(rhp); 1389 } 1390 1391 // Set up and carry out testing of RCU's global memory ordering 1392 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1393 struct torture_random_state *trsp) 1394 { 1395 unsigned long loops; 1396 int noc = torture_num_online_cpus(); 1397 int rdrchked; 1398 int rdrchker; 1399 struct rcu_torture_reader_check *rtrcp; // Me. 1400 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1401 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1402 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1403 1404 if (myid < 0) 1405 return; // Don't try this from timer handlers. 1406 1407 // Increment my counter. 1408 rtrcp = &rcu_torture_reader_mbchk[myid]; 1409 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1410 1411 // Attempt to assign someone else some checking work. 1412 rdrchked = torture_random(trsp) % nrealreaders; 1413 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1414 rdrchker = torture_random(trsp) % nrealreaders; 1415 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1416 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1417 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1418 !READ_ONCE(rtp->rtort_chkp) && 1419 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1420 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1421 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1422 rtrcp->rtc_chkrdr = rdrchked; 1423 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1424 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1425 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1426 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1427 } 1428 1429 // If assigned some completed work, do it! 1430 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1431 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1432 return; // No work or work not yet ready. 1433 rdrchked = rtrcp_assigner->rtc_chkrdr; 1434 if (WARN_ON_ONCE(rdrchked < 0)) 1435 return; 1436 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1437 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1438 atomic_inc(&n_rcu_torture_mbchk_tries); 1439 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1440 atomic_inc(&n_rcu_torture_mbchk_fail); 1441 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1442 rtrcp_assigner->rtc_ready = 0; 1443 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1444 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1445 } 1446 1447 /* 1448 * Do one extension of an RCU read-side critical section using the 1449 * current reader state in readstate (set to zero for initial entry 1450 * to extended critical section), set the new state as specified by 1451 * newstate (set to zero for final exit from extended critical section), 1452 * and random-number-generator state in trsp. If this is neither the 1453 * beginning or end of the critical section and if there was actually a 1454 * change, do a ->read_delay(). 1455 */ 1456 static void rcutorture_one_extend(int *readstate, int newstate, 1457 struct torture_random_state *trsp, 1458 struct rt_read_seg *rtrsp) 1459 { 1460 unsigned long flags; 1461 int idxnew1 = -1; 1462 int idxnew2 = -1; 1463 int idxold1 = *readstate; 1464 int idxold2 = idxold1; 1465 int statesnew = ~*readstate & newstate; 1466 int statesold = *readstate & ~newstate; 1467 1468 WARN_ON_ONCE(idxold2 < 0); 1469 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1470 rtrsp->rt_readstate = newstate; 1471 1472 /* First, put new protection in place to avoid critical-section gap. */ 1473 if (statesnew & RCUTORTURE_RDR_BH) 1474 local_bh_disable(); 1475 if (statesnew & RCUTORTURE_RDR_RBH) 1476 rcu_read_lock_bh(); 1477 if (statesnew & RCUTORTURE_RDR_IRQ) 1478 local_irq_disable(); 1479 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1480 preempt_disable(); 1481 if (statesnew & RCUTORTURE_RDR_SCHED) 1482 rcu_read_lock_sched(); 1483 if (statesnew & RCUTORTURE_RDR_RCU_1) 1484 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1; 1485 if (statesnew & RCUTORTURE_RDR_RCU_2) 1486 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2; 1487 1488 /* 1489 * Next, remove old protection, in decreasing order of strength 1490 * to avoid unlock paths that aren't safe in the stronger 1491 * context. Namely: BH can not be enabled with disabled interrupts. 1492 * Additionally PREEMPT_RT requires that BH is enabled in preemptible 1493 * context. 1494 */ 1495 if (statesold & RCUTORTURE_RDR_IRQ) 1496 local_irq_enable(); 1497 if (statesold & RCUTORTURE_RDR_PREEMPT) 1498 preempt_enable(); 1499 if (statesold & RCUTORTURE_RDR_SCHED) 1500 rcu_read_unlock_sched(); 1501 if (statesold & RCUTORTURE_RDR_BH) 1502 local_bh_enable(); 1503 if (statesold & RCUTORTURE_RDR_RBH) 1504 rcu_read_unlock_bh(); 1505 if (statesold & RCUTORTURE_RDR_RCU_2) { 1506 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1); 1507 WARN_ON_ONCE(idxnew2 != -1); 1508 idxold2 = 0; 1509 } 1510 if (statesold & RCUTORTURE_RDR_RCU_1) { 1511 bool lockit; 1512 1513 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff); 1514 if (lockit) 1515 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1516 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1); 1517 WARN_ON_ONCE(idxnew1 != -1); 1518 idxold1 = 0; 1519 if (lockit) 1520 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1521 } 1522 1523 /* Delay if neither beginning nor end and there was a change. */ 1524 if ((statesnew || statesold) && *readstate && newstate) 1525 cur_ops->read_delay(trsp, rtrsp); 1526 1527 /* Update the reader state. */ 1528 if (idxnew1 == -1) 1529 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1; 1530 WARN_ON_ONCE(idxnew1 < 0); 1531 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1)) 1532 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1); 1533 if (idxnew2 == -1) 1534 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2; 1535 WARN_ON_ONCE(idxnew2 < 0); 1536 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1); 1537 *readstate = idxnew1 | idxnew2 | newstate; 1538 WARN_ON_ONCE(*readstate < 0); 1539 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1)) 1540 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2); 1541 } 1542 1543 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1544 static int rcutorture_extend_mask_max(void) 1545 { 1546 int mask; 1547 1548 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1549 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1550 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2; 1551 return mask; 1552 } 1553 1554 /* Return a random protection state mask, but with at least one bit set. */ 1555 static int 1556 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1557 { 1558 int mask = rcutorture_extend_mask_max(); 1559 unsigned long randmask1 = torture_random(trsp) >> 8; 1560 unsigned long randmask2 = randmask1 >> 3; 1561 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; 1562 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; 1563 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1564 1565 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1); 1566 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1567 if (!(randmask1 & 0x7)) 1568 mask = mask & randmask2; 1569 else 1570 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1571 1572 // Can't have nested RCU reader without outer RCU reader. 1573 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) { 1574 if (oldmask & RCUTORTURE_RDR_RCU_1) 1575 mask &= ~RCUTORTURE_RDR_RCU_2; 1576 else 1577 mask |= RCUTORTURE_RDR_RCU_1; 1578 } 1579 1580 /* 1581 * Can't enable bh w/irq disabled. 1582 */ 1583 if (mask & RCUTORTURE_RDR_IRQ) 1584 mask |= oldmask & bhs; 1585 1586 /* 1587 * Ideally these sequences would be detected in debug builds 1588 * (regardless of RT), but until then don't stop testing 1589 * them on non-RT. 1590 */ 1591 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1592 /* Can't modify BH in atomic context */ 1593 if (oldmask & preempts_irq) 1594 mask &= ~bhs; 1595 if ((oldmask | mask) & preempts_irq) 1596 mask |= oldmask & bhs; 1597 } 1598 1599 return mask ?: RCUTORTURE_RDR_RCU_1; 1600 } 1601 1602 /* 1603 * Do a randomly selected number of extensions of an existing RCU read-side 1604 * critical section. 1605 */ 1606 static struct rt_read_seg * 1607 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1608 struct rt_read_seg *rtrsp) 1609 { 1610 int i; 1611 int j; 1612 int mask = rcutorture_extend_mask_max(); 1613 1614 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1615 if (!((mask - 1) & mask)) 1616 return rtrsp; /* Current RCU reader not extendable. */ 1617 /* Bias towards larger numbers of loops. */ 1618 i = (torture_random(trsp) >> 3); 1619 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1620 for (j = 0; j < i; j++) { 1621 mask = rcutorture_extend_mask(*readstate, trsp); 1622 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1623 } 1624 return &rtrsp[j]; 1625 } 1626 1627 /* 1628 * Do one read-side critical section, returning false if there was 1629 * no data to read. Can be invoked both from process context and 1630 * from a timer handler. 1631 */ 1632 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1633 { 1634 unsigned long cookie; 1635 int i; 1636 unsigned long started; 1637 unsigned long completed; 1638 int newstate; 1639 struct rcu_torture *p; 1640 int pipe_count; 1641 int readstate = 0; 1642 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1643 struct rt_read_seg *rtrsp = &rtseg[0]; 1644 struct rt_read_seg *rtrsp1; 1645 unsigned long long ts; 1646 1647 WARN_ON_ONCE(!rcu_is_watching()); 1648 newstate = rcutorture_extend_mask(readstate, trsp); 1649 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1650 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1651 cookie = cur_ops->get_gp_state(); 1652 started = cur_ops->get_gp_seq(); 1653 ts = rcu_trace_clock_local(); 1654 p = rcu_dereference_check(rcu_torture_current, 1655 !cur_ops->readlock_held || cur_ops->readlock_held()); 1656 if (p == NULL) { 1657 /* Wait for rcu_torture_writer to get underway */ 1658 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1659 return false; 1660 } 1661 if (p->rtort_mbtest == 0) 1662 atomic_inc(&n_rcu_torture_mberror); 1663 rcu_torture_reader_do_mbchk(myid, p, trsp); 1664 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1665 preempt_disable(); 1666 pipe_count = READ_ONCE(p->rtort_pipe_count); 1667 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1668 /* Should not happen, but... */ 1669 pipe_count = RCU_TORTURE_PIPE_LEN; 1670 } 1671 completed = cur_ops->get_gp_seq(); 1672 if (pipe_count > 1) { 1673 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1674 ts, started, completed); 1675 rcu_ftrace_dump(DUMP_ALL); 1676 } 1677 __this_cpu_inc(rcu_torture_count[pipe_count]); 1678 completed = rcutorture_seq_diff(completed, started); 1679 if (completed > RCU_TORTURE_PIPE_LEN) { 1680 /* Should not happen, but... */ 1681 completed = RCU_TORTURE_PIPE_LEN; 1682 } 1683 __this_cpu_inc(rcu_torture_batch[completed]); 1684 preempt_enable(); 1685 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1686 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1687 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 1688 __func__, 1689 rcu_torture_writer_state_getname(), 1690 rcu_torture_writer_state, 1691 cookie, cur_ops->get_gp_state()); 1692 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1693 WARN_ON_ONCE(readstate); 1694 // This next splat is expected behavior if leakpointer, especially 1695 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 1696 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 1697 1698 /* If error or close call, record the sequence of reader protections. */ 1699 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1700 i = 0; 1701 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1702 err_segs[i++] = *rtrsp1; 1703 rt_read_nsegs = i; 1704 } 1705 1706 return true; 1707 } 1708 1709 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1710 1711 /* 1712 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1713 * incrementing the corresponding element of the pipeline array. The 1714 * counter in the element should never be greater than 1, otherwise, the 1715 * RCU implementation is broken. 1716 */ 1717 static void rcu_torture_timer(struct timer_list *unused) 1718 { 1719 atomic_long_inc(&n_rcu_torture_timers); 1720 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 1721 1722 /* Test call_rcu() invocation from interrupt handler. */ 1723 if (cur_ops->call) { 1724 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1725 1726 if (rhp) 1727 cur_ops->call(rhp, rcu_torture_timer_cb); 1728 } 1729 } 1730 1731 /* 1732 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1733 * incrementing the corresponding element of the pipeline array. The 1734 * counter in the element should never be greater than 1, otherwise, the 1735 * RCU implementation is broken. 1736 */ 1737 static int 1738 rcu_torture_reader(void *arg) 1739 { 1740 unsigned long lastsleep = jiffies; 1741 long myid = (long)arg; 1742 int mynumonline = myid; 1743 DEFINE_TORTURE_RANDOM(rand); 1744 struct timer_list t; 1745 1746 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1747 set_user_nice(current, MAX_NICE); 1748 if (irqreader && cur_ops->irq_capable) 1749 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1750 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1751 do { 1752 if (irqreader && cur_ops->irq_capable) { 1753 if (!timer_pending(&t)) 1754 mod_timer(&t, jiffies + 1); 1755 } 1756 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 1757 schedule_timeout_interruptible(HZ); 1758 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1759 torture_hrtimeout_us(500, 1000, &rand); 1760 lastsleep = jiffies + 10; 1761 } 1762 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 1763 schedule_timeout_interruptible(HZ / 5); 1764 stutter_wait("rcu_torture_reader"); 1765 } while (!torture_must_stop()); 1766 if (irqreader && cur_ops->irq_capable) { 1767 del_timer_sync(&t); 1768 destroy_timer_on_stack(&t); 1769 } 1770 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1771 torture_kthread_stopping("rcu_torture_reader"); 1772 return 0; 1773 } 1774 1775 /* 1776 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 1777 * increase race probabilities and fuzzes the interval between toggling. 1778 */ 1779 static int rcu_nocb_toggle(void *arg) 1780 { 1781 int cpu; 1782 int maxcpu = -1; 1783 int oldnice = task_nice(current); 1784 long r; 1785 DEFINE_TORTURE_RANDOM(rand); 1786 ktime_t toggle_delay; 1787 unsigned long toggle_fuzz; 1788 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 1789 1790 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 1791 while (!rcu_inkernel_boot_has_ended()) 1792 schedule_timeout_interruptible(HZ / 10); 1793 for_each_online_cpu(cpu) 1794 maxcpu = cpu; 1795 WARN_ON(maxcpu < 0); 1796 if (toggle_interval > ULONG_MAX) 1797 toggle_fuzz = ULONG_MAX >> 3; 1798 else 1799 toggle_fuzz = toggle_interval >> 3; 1800 if (toggle_fuzz <= 0) 1801 toggle_fuzz = NSEC_PER_USEC; 1802 do { 1803 r = torture_random(&rand); 1804 cpu = (r >> 4) % (maxcpu + 1); 1805 if (r & 0x1) { 1806 rcu_nocb_cpu_offload(cpu); 1807 atomic_long_inc(&n_nocb_offload); 1808 } else { 1809 rcu_nocb_cpu_deoffload(cpu); 1810 atomic_long_inc(&n_nocb_deoffload); 1811 } 1812 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 1813 set_current_state(TASK_INTERRUPTIBLE); 1814 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 1815 if (stutter_wait("rcu_nocb_toggle")) 1816 sched_set_normal(current, oldnice); 1817 } while (!torture_must_stop()); 1818 torture_kthread_stopping("rcu_nocb_toggle"); 1819 return 0; 1820 } 1821 1822 /* 1823 * Print torture statistics. Caller must ensure that there is only 1824 * one call to this function at a given time!!! This is normally 1825 * accomplished by relying on the module system to only have one copy 1826 * of the module loaded, and then by giving the rcu_torture_stats 1827 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1828 * thread is not running). 1829 */ 1830 static void 1831 rcu_torture_stats_print(void) 1832 { 1833 int cpu; 1834 int i; 1835 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1836 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1837 struct rcu_torture *rtcp; 1838 static unsigned long rtcv_snap = ULONG_MAX; 1839 static bool splatted; 1840 struct task_struct *wtp; 1841 1842 for_each_possible_cpu(cpu) { 1843 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1844 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1845 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1846 } 1847 } 1848 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1849 if (pipesummary[i] != 0) 1850 break; 1851 } 1852 1853 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1854 rtcp = rcu_access_pointer(rcu_torture_current); 1855 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1856 rtcp, 1857 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1858 rcu_torture_current_version, 1859 list_empty(&rcu_torture_freelist), 1860 atomic_read(&n_rcu_torture_alloc), 1861 atomic_read(&n_rcu_torture_alloc_fail), 1862 atomic_read(&n_rcu_torture_free)); 1863 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", 1864 atomic_read(&n_rcu_torture_mberror), 1865 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 1866 n_rcu_torture_barrier_error, 1867 n_rcu_torture_boost_ktrerror, 1868 n_rcu_torture_boost_rterror); 1869 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1870 n_rcu_torture_boost_failure, 1871 n_rcu_torture_boosts, 1872 atomic_long_read(&n_rcu_torture_timers)); 1873 torture_onoff_stats(); 1874 pr_cont("barrier: %ld/%ld:%ld ", 1875 data_race(n_barrier_successes), 1876 data_race(n_barrier_attempts), 1877 data_race(n_rcu_torture_barrier_error)); 1878 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 1879 pr_cont("nocb-toggles: %ld:%ld\n", 1880 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 1881 1882 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1883 if (atomic_read(&n_rcu_torture_mberror) || 1884 atomic_read(&n_rcu_torture_mbchk_fail) || 1885 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1886 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1887 i > 1) { 1888 pr_cont("%s", "!!! "); 1889 atomic_inc(&n_rcu_torture_error); 1890 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1891 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 1892 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1893 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1894 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1895 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 1896 WARN_ON_ONCE(i > 1); // Too-short grace period 1897 } 1898 pr_cont("Reader Pipe: "); 1899 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1900 pr_cont(" %ld", pipesummary[i]); 1901 pr_cont("\n"); 1902 1903 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1904 pr_cont("Reader Batch: "); 1905 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1906 pr_cont(" %ld", batchsummary[i]); 1907 pr_cont("\n"); 1908 1909 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1910 pr_cont("Free-Block Circulation: "); 1911 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1912 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1913 } 1914 pr_cont("\n"); 1915 1916 if (cur_ops->stats) 1917 cur_ops->stats(); 1918 if (rtcv_snap == rcu_torture_current_version && 1919 rcu_access_pointer(rcu_torture_current) && 1920 !rcu_stall_is_suppressed()) { 1921 int __maybe_unused flags = 0; 1922 unsigned long __maybe_unused gp_seq = 0; 1923 1924 rcutorture_get_gp_data(cur_ops->ttype, 1925 &flags, &gp_seq); 1926 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1927 &flags, &gp_seq); 1928 wtp = READ_ONCE(writer_task); 1929 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n", 1930 rcu_torture_writer_state_getname(), 1931 rcu_torture_writer_state, gp_seq, flags, 1932 wtp == NULL ? ~0U : wtp->__state, 1933 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1934 if (!splatted && wtp) { 1935 sched_show_task(wtp); 1936 splatted = true; 1937 } 1938 if (cur_ops->gp_kthread_dbg) 1939 cur_ops->gp_kthread_dbg(); 1940 rcu_ftrace_dump(DUMP_ALL); 1941 } 1942 rtcv_snap = rcu_torture_current_version; 1943 } 1944 1945 /* 1946 * Periodically prints torture statistics, if periodic statistics printing 1947 * was specified via the stat_interval module parameter. 1948 */ 1949 static int 1950 rcu_torture_stats(void *arg) 1951 { 1952 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1953 do { 1954 schedule_timeout_interruptible(stat_interval * HZ); 1955 rcu_torture_stats_print(); 1956 torture_shutdown_absorb("rcu_torture_stats"); 1957 } while (!torture_must_stop()); 1958 torture_kthread_stopping("rcu_torture_stats"); 1959 return 0; 1960 } 1961 1962 /* Test mem_dump_obj() and friends. */ 1963 static void rcu_torture_mem_dump_obj(void) 1964 { 1965 struct rcu_head *rhp; 1966 struct kmem_cache *kcp; 1967 static int z; 1968 1969 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 1970 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 1971 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 1972 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 1973 mem_dump_obj(ZERO_SIZE_PTR); 1974 pr_alert("mem_dump_obj(NULL):"); 1975 mem_dump_obj(NULL); 1976 pr_alert("mem_dump_obj(%px):", &rhp); 1977 mem_dump_obj(&rhp); 1978 pr_alert("mem_dump_obj(%px):", rhp); 1979 mem_dump_obj(rhp); 1980 pr_alert("mem_dump_obj(%px):", &rhp->func); 1981 mem_dump_obj(&rhp->func); 1982 pr_alert("mem_dump_obj(%px):", &z); 1983 mem_dump_obj(&z); 1984 kmem_cache_free(kcp, rhp); 1985 kmem_cache_destroy(kcp); 1986 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 1987 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 1988 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 1989 mem_dump_obj(rhp); 1990 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 1991 mem_dump_obj(&rhp->func); 1992 kfree(rhp); 1993 rhp = vmalloc(4096); 1994 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 1995 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 1996 mem_dump_obj(rhp); 1997 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 1998 mem_dump_obj(&rhp->func); 1999 vfree(rhp); 2000 } 2001 2002 static void 2003 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 2004 { 2005 pr_alert("%s" TORTURE_FLAG 2006 "--- %s: nreaders=%d nfakewriters=%d " 2007 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 2008 "shuffle_interval=%d stutter=%d irqreader=%d " 2009 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 2010 "test_boost=%d/%d test_boost_interval=%d " 2011 "test_boost_duration=%d shutdown_secs=%d " 2012 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 2013 "stall_cpu_block=%d " 2014 "n_barrier_cbs=%d " 2015 "onoff_interval=%d onoff_holdoff=%d " 2016 "read_exit_delay=%d read_exit_burst=%d " 2017 "nocbs_nthreads=%d nocbs_toggle=%d\n", 2018 torture_type, tag, nrealreaders, nfakewriters, 2019 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 2020 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 2021 test_boost, cur_ops->can_boost, 2022 test_boost_interval, test_boost_duration, shutdown_secs, 2023 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 2024 stall_cpu_block, 2025 n_barrier_cbs, 2026 onoff_interval, onoff_holdoff, 2027 read_exit_delay, read_exit_burst, 2028 nocbs_nthreads, nocbs_toggle); 2029 } 2030 2031 static int rcutorture_booster_cleanup(unsigned int cpu) 2032 { 2033 struct task_struct *t; 2034 2035 if (boost_tasks[cpu] == NULL) 2036 return 0; 2037 mutex_lock(&boost_mutex); 2038 t = boost_tasks[cpu]; 2039 boost_tasks[cpu] = NULL; 2040 rcu_torture_enable_rt_throttle(); 2041 mutex_unlock(&boost_mutex); 2042 2043 /* This must be outside of the mutex, otherwise deadlock! */ 2044 torture_stop_kthread(rcu_torture_boost, t); 2045 return 0; 2046 } 2047 2048 static int rcutorture_booster_init(unsigned int cpu) 2049 { 2050 int retval; 2051 2052 if (boost_tasks[cpu] != NULL) 2053 return 0; /* Already created, nothing more to do. */ 2054 2055 /* Don't allow time recalculation while creating a new task. */ 2056 mutex_lock(&boost_mutex); 2057 rcu_torture_disable_rt_throttle(); 2058 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 2059 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL, 2060 cpu, "rcu_torture_boost_%u"); 2061 if (IS_ERR(boost_tasks[cpu])) { 2062 retval = PTR_ERR(boost_tasks[cpu]); 2063 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 2064 n_rcu_torture_boost_ktrerror++; 2065 boost_tasks[cpu] = NULL; 2066 mutex_unlock(&boost_mutex); 2067 return retval; 2068 } 2069 mutex_unlock(&boost_mutex); 2070 return 0; 2071 } 2072 2073 /* 2074 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 2075 * induces a CPU stall for the time specified by stall_cpu. 2076 */ 2077 static int rcu_torture_stall(void *args) 2078 { 2079 int idx; 2080 unsigned long stop_at; 2081 2082 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 2083 if (stall_cpu_holdoff > 0) { 2084 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 2085 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 2086 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 2087 } 2088 if (!kthread_should_stop() && stall_gp_kthread > 0) { 2089 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 2090 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 2091 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 2092 if (kthread_should_stop()) 2093 break; 2094 schedule_timeout_uninterruptible(HZ); 2095 } 2096 } 2097 if (!kthread_should_stop() && stall_cpu > 0) { 2098 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2099 stop_at = ktime_get_seconds() + stall_cpu; 2100 /* RCU CPU stall is expected behavior in following code. */ 2101 idx = cur_ops->readlock(); 2102 if (stall_cpu_irqsoff) 2103 local_irq_disable(); 2104 else if (!stall_cpu_block) 2105 preempt_disable(); 2106 pr_alert("%s start on CPU %d.\n", 2107 __func__, raw_smp_processor_id()); 2108 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2109 stop_at)) 2110 if (stall_cpu_block) { 2111 #ifdef CONFIG_PREEMPTION 2112 preempt_schedule(); 2113 #else 2114 schedule_timeout_uninterruptible(HZ); 2115 #endif 2116 } else if (stall_no_softlockup) { 2117 touch_softlockup_watchdog(); 2118 } 2119 if (stall_cpu_irqsoff) 2120 local_irq_enable(); 2121 else if (!stall_cpu_block) 2122 preempt_enable(); 2123 cur_ops->readunlock(idx); 2124 } 2125 pr_alert("%s end.\n", __func__); 2126 torture_shutdown_absorb("rcu_torture_stall"); 2127 while (!kthread_should_stop()) 2128 schedule_timeout_interruptible(10 * HZ); 2129 return 0; 2130 } 2131 2132 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2133 static int __init rcu_torture_stall_init(void) 2134 { 2135 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2136 return 0; 2137 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2138 } 2139 2140 /* State structure for forward-progress self-propagating RCU callback. */ 2141 struct fwd_cb_state { 2142 struct rcu_head rh; 2143 int stop; 2144 }; 2145 2146 /* 2147 * Forward-progress self-propagating RCU callback function. Because 2148 * callbacks run from softirq, this function is an implicit RCU read-side 2149 * critical section. 2150 */ 2151 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2152 { 2153 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2154 2155 if (READ_ONCE(fcsp->stop)) { 2156 WRITE_ONCE(fcsp->stop, 2); 2157 return; 2158 } 2159 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2160 } 2161 2162 /* State for continuous-flood RCU callbacks. */ 2163 struct rcu_fwd_cb { 2164 struct rcu_head rh; 2165 struct rcu_fwd_cb *rfc_next; 2166 struct rcu_fwd *rfc_rfp; 2167 int rfc_gps; 2168 }; 2169 2170 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2171 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2172 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2173 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2174 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2175 2176 struct rcu_launder_hist { 2177 long n_launders; 2178 unsigned long launder_gp_seq; 2179 }; 2180 2181 struct rcu_fwd { 2182 spinlock_t rcu_fwd_lock; 2183 struct rcu_fwd_cb *rcu_fwd_cb_head; 2184 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2185 long n_launders_cb; 2186 unsigned long rcu_fwd_startat; 2187 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2188 unsigned long rcu_launder_gp_seq_start; 2189 int rcu_fwd_id; 2190 }; 2191 2192 static DEFINE_MUTEX(rcu_fwd_mutex); 2193 static struct rcu_fwd *rcu_fwds; 2194 static unsigned long rcu_fwd_seq; 2195 static atomic_long_t rcu_fwd_max_cbs; 2196 static bool rcu_fwd_emergency_stop; 2197 2198 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2199 { 2200 unsigned long gps; 2201 unsigned long gps_old; 2202 int i; 2203 int j; 2204 2205 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2206 if (rfp->n_launders_hist[i].n_launders > 0) 2207 break; 2208 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):", 2209 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat); 2210 gps_old = rfp->rcu_launder_gp_seq_start; 2211 for (j = 0; j <= i; j++) { 2212 gps = rfp->n_launders_hist[j].launder_gp_seq; 2213 pr_cont(" %ds/%d: %ld:%ld", 2214 j + 1, FWD_CBS_HIST_DIV, 2215 rfp->n_launders_hist[j].n_launders, 2216 rcutorture_seq_diff(gps, gps_old)); 2217 gps_old = gps; 2218 } 2219 pr_cont("\n"); 2220 } 2221 2222 /* Callback function for continuous-flood RCU callbacks. */ 2223 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2224 { 2225 unsigned long flags; 2226 int i; 2227 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2228 struct rcu_fwd_cb **rfcpp; 2229 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2230 2231 rfcp->rfc_next = NULL; 2232 rfcp->rfc_gps++; 2233 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2234 rfcpp = rfp->rcu_fwd_cb_tail; 2235 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2236 WRITE_ONCE(*rfcpp, rfcp); 2237 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2238 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2239 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2240 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2241 rfp->n_launders_hist[i].n_launders++; 2242 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2243 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2244 } 2245 2246 // Give the scheduler a chance, even on nohz_full CPUs. 2247 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2248 { 2249 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2250 // Real call_rcu() floods hit userspace, so emulate that. 2251 if (need_resched() || (iter & 0xfff)) 2252 schedule(); 2253 return; 2254 } 2255 // No userspace emulation: CB invocation throttles call_rcu() 2256 cond_resched(); 2257 } 2258 2259 /* 2260 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2261 * test is over or because we hit an OOM event. 2262 */ 2263 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2264 { 2265 unsigned long flags; 2266 unsigned long freed = 0; 2267 struct rcu_fwd_cb *rfcp; 2268 2269 for (;;) { 2270 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2271 rfcp = rfp->rcu_fwd_cb_head; 2272 if (!rfcp) { 2273 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2274 break; 2275 } 2276 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2277 if (!rfp->rcu_fwd_cb_head) 2278 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2279 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2280 kfree(rfcp); 2281 freed++; 2282 rcu_torture_fwd_prog_cond_resched(freed); 2283 if (tick_nohz_full_enabled()) { 2284 local_irq_save(flags); 2285 rcu_momentary_dyntick_idle(); 2286 local_irq_restore(flags); 2287 } 2288 } 2289 return freed; 2290 } 2291 2292 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2293 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2294 int *tested, int *tested_tries) 2295 { 2296 unsigned long cver; 2297 unsigned long dur; 2298 struct fwd_cb_state fcs; 2299 unsigned long gps; 2300 int idx; 2301 int sd; 2302 int sd4; 2303 bool selfpropcb = false; 2304 unsigned long stopat; 2305 static DEFINE_TORTURE_RANDOM(trs); 2306 2307 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2308 if (!cur_ops->sync) 2309 return; // Cannot do need_resched() forward progress testing without ->sync. 2310 if (cur_ops->call && cur_ops->cb_barrier) { 2311 init_rcu_head_on_stack(&fcs.rh); 2312 selfpropcb = true; 2313 } 2314 2315 /* Tight loop containing cond_resched(). */ 2316 atomic_inc(&rcu_fwd_cb_nodelay); 2317 cur_ops->sync(); /* Later readers see above write. */ 2318 if (selfpropcb) { 2319 WRITE_ONCE(fcs.stop, 0); 2320 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2321 } 2322 cver = READ_ONCE(rcu_torture_current_version); 2323 gps = cur_ops->get_gp_seq(); 2324 sd = cur_ops->stall_dur() + 1; 2325 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2326 dur = sd4 + torture_random(&trs) % (sd - sd4); 2327 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2328 stopat = rfp->rcu_fwd_startat + dur; 2329 while (time_before(jiffies, stopat) && 2330 !shutdown_time_arrived() && 2331 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2332 idx = cur_ops->readlock(); 2333 udelay(10); 2334 cur_ops->readunlock(idx); 2335 if (!fwd_progress_need_resched || need_resched()) 2336 cond_resched(); 2337 } 2338 (*tested_tries)++; 2339 if (!time_before(jiffies, stopat) && 2340 !shutdown_time_arrived() && 2341 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2342 (*tested)++; 2343 cver = READ_ONCE(rcu_torture_current_version) - cver; 2344 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2345 WARN_ON(!cver && gps < 2); 2346 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__, 2347 rfp->rcu_fwd_id, dur, cver, gps); 2348 } 2349 if (selfpropcb) { 2350 WRITE_ONCE(fcs.stop, 1); 2351 cur_ops->sync(); /* Wait for running CB to complete. */ 2352 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2353 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2354 } 2355 2356 if (selfpropcb) { 2357 WARN_ON(READ_ONCE(fcs.stop) != 2); 2358 destroy_rcu_head_on_stack(&fcs.rh); 2359 } 2360 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2361 atomic_dec(&rcu_fwd_cb_nodelay); 2362 } 2363 2364 /* Carry out call_rcu() forward-progress testing. */ 2365 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2366 { 2367 unsigned long cver; 2368 unsigned long flags; 2369 unsigned long gps; 2370 int i; 2371 long n_launders; 2372 long n_launders_cb_snap; 2373 long n_launders_sa; 2374 long n_max_cbs; 2375 long n_max_gps; 2376 struct rcu_fwd_cb *rfcp; 2377 struct rcu_fwd_cb *rfcpn; 2378 unsigned long stopat; 2379 unsigned long stoppedat; 2380 2381 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2382 if (READ_ONCE(rcu_fwd_emergency_stop)) 2383 return; /* Get out of the way quickly, no GP wait! */ 2384 if (!cur_ops->call) 2385 return; /* Can't do call_rcu() fwd prog without ->call. */ 2386 2387 /* Loop continuously posting RCU callbacks. */ 2388 atomic_inc(&rcu_fwd_cb_nodelay); 2389 cur_ops->sync(); /* Later readers see above write. */ 2390 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2391 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2392 n_launders = 0; 2393 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2394 n_launders_sa = 0; 2395 n_max_cbs = 0; 2396 n_max_gps = 0; 2397 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2398 rfp->n_launders_hist[i].n_launders = 0; 2399 cver = READ_ONCE(rcu_torture_current_version); 2400 gps = cur_ops->get_gp_seq(); 2401 rfp->rcu_launder_gp_seq_start = gps; 2402 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2403 while (time_before(jiffies, stopat) && 2404 !shutdown_time_arrived() && 2405 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2406 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2407 rfcpn = NULL; 2408 if (rfcp) 2409 rfcpn = READ_ONCE(rfcp->rfc_next); 2410 if (rfcpn) { 2411 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2412 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2413 break; 2414 rfp->rcu_fwd_cb_head = rfcpn; 2415 n_launders++; 2416 n_launders_sa++; 2417 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) { 2418 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2419 if (WARN_ON_ONCE(!rfcp)) { 2420 schedule_timeout_interruptible(1); 2421 continue; 2422 } 2423 n_max_cbs++; 2424 n_launders_sa = 0; 2425 rfcp->rfc_gps = 0; 2426 rfcp->rfc_rfp = rfp; 2427 } else { 2428 rfcp = NULL; 2429 } 2430 if (rfcp) 2431 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2432 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2433 if (tick_nohz_full_enabled()) { 2434 local_irq_save(flags); 2435 rcu_momentary_dyntick_idle(); 2436 local_irq_restore(flags); 2437 } 2438 } 2439 stoppedat = jiffies; 2440 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2441 cver = READ_ONCE(rcu_torture_current_version) - cver; 2442 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2443 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id); 2444 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2445 (void)rcu_torture_fwd_prog_cbfree(rfp); 2446 2447 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2448 !shutdown_time_arrived()) { 2449 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2450 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2451 __func__, 2452 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2453 n_launders + n_max_cbs - n_launders_cb_snap, 2454 n_launders, n_launders_sa, 2455 n_max_gps, n_max_cbs, cver, gps); 2456 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs); 2457 mutex_lock(&rcu_fwd_mutex); // Serialize histograms. 2458 rcu_torture_fwd_cb_hist(rfp); 2459 mutex_unlock(&rcu_fwd_mutex); 2460 } 2461 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2462 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2463 atomic_dec(&rcu_fwd_cb_nodelay); 2464 } 2465 2466 2467 /* 2468 * OOM notifier, but this only prints diagnostic information for the 2469 * current forward-progress test. 2470 */ 2471 static int rcutorture_oom_notify(struct notifier_block *self, 2472 unsigned long notused, void *nfreed) 2473 { 2474 int i; 2475 long ncbs; 2476 struct rcu_fwd *rfp; 2477 2478 mutex_lock(&rcu_fwd_mutex); 2479 rfp = rcu_fwds; 2480 if (!rfp) { 2481 mutex_unlock(&rcu_fwd_mutex); 2482 return NOTIFY_OK; 2483 } 2484 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2485 __func__); 2486 for (i = 0; i < fwd_progress; i++) { 2487 rcu_torture_fwd_cb_hist(&rfp[i]); 2488 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2); 2489 } 2490 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2491 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2492 ncbs = 0; 2493 for (i = 0; i < fwd_progress; i++) 2494 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2495 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2496 rcu_barrier(); 2497 ncbs = 0; 2498 for (i = 0; i < fwd_progress; i++) 2499 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2500 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2501 rcu_barrier(); 2502 ncbs = 0; 2503 for (i = 0; i < fwd_progress; i++) 2504 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]); 2505 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs); 2506 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2507 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2508 pr_info("%s returning after OOM processing.\n", __func__); 2509 mutex_unlock(&rcu_fwd_mutex); 2510 return NOTIFY_OK; 2511 } 2512 2513 static struct notifier_block rcutorture_oom_nb = { 2514 .notifier_call = rcutorture_oom_notify 2515 }; 2516 2517 /* Carry out grace-period forward-progress testing. */ 2518 static int rcu_torture_fwd_prog(void *args) 2519 { 2520 bool firsttime = true; 2521 long max_cbs; 2522 int oldnice = task_nice(current); 2523 unsigned long oldseq = READ_ONCE(rcu_fwd_seq); 2524 struct rcu_fwd *rfp = args; 2525 int tested = 0; 2526 int tested_tries = 0; 2527 2528 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2529 rcu_bind_current_to_nocb(); 2530 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2531 set_user_nice(current, MAX_NICE); 2532 do { 2533 if (!rfp->rcu_fwd_id) { 2534 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2535 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2536 if (!firsttime) { 2537 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0); 2538 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs); 2539 } 2540 firsttime = false; 2541 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1); 2542 } else { 2543 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop()) 2544 schedule_timeout_interruptible(1); 2545 oldseq = READ_ONCE(rcu_fwd_seq); 2546 } 2547 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id); 2548 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id) 2549 rcu_torture_fwd_prog_cr(rfp); 2550 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) && 2551 (!IS_ENABLED(CONFIG_TINY_RCU) || 2552 (rcu_inkernel_boot_has_ended() && 2553 torture_num_online_cpus() > rfp->rcu_fwd_id))) 2554 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2555 2556 /* Avoid slow periods, better to test when busy. */ 2557 if (stutter_wait("rcu_torture_fwd_prog")) 2558 sched_set_normal(current, oldnice); 2559 } while (!torture_must_stop()); 2560 /* Short runs might not contain a valid forward-progress attempt. */ 2561 if (!rfp->rcu_fwd_id) { 2562 WARN_ON(!tested && tested_tries >= 5); 2563 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2564 } 2565 torture_kthread_stopping("rcu_torture_fwd_prog"); 2566 return 0; 2567 } 2568 2569 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2570 static int __init rcu_torture_fwd_prog_init(void) 2571 { 2572 int i; 2573 int ret = 0; 2574 struct rcu_fwd *rfp; 2575 2576 if (!fwd_progress) 2577 return 0; /* Not requested, so don't do it. */ 2578 if (fwd_progress >= nr_cpu_ids) { 2579 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n"); 2580 fwd_progress = nr_cpu_ids; 2581 } else if (fwd_progress < 0) { 2582 fwd_progress = nr_cpu_ids; 2583 } 2584 if ((!cur_ops->sync && !cur_ops->call) || 2585 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) || 2586 cur_ops == &rcu_busted_ops) { 2587 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2588 fwd_progress = 0; 2589 return 0; 2590 } 2591 if (stall_cpu > 0) { 2592 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2593 fwd_progress = 0; 2594 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST)) 2595 return -EINVAL; /* In module, can fail back to user. */ 2596 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2597 return 0; 2598 } 2599 if (fwd_progress_holdoff <= 0) 2600 fwd_progress_holdoff = 1; 2601 if (fwd_progress_div <= 0) 2602 fwd_progress_div = 4; 2603 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL); 2604 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL); 2605 if (!rfp || !fwd_prog_tasks) { 2606 kfree(rfp); 2607 kfree(fwd_prog_tasks); 2608 fwd_prog_tasks = NULL; 2609 fwd_progress = 0; 2610 return -ENOMEM; 2611 } 2612 for (i = 0; i < fwd_progress; i++) { 2613 spin_lock_init(&rfp[i].rcu_fwd_lock); 2614 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head; 2615 rfp[i].rcu_fwd_id = i; 2616 } 2617 mutex_lock(&rcu_fwd_mutex); 2618 rcu_fwds = rfp; 2619 mutex_unlock(&rcu_fwd_mutex); 2620 register_oom_notifier(&rcutorture_oom_nb); 2621 for (i = 0; i < fwd_progress; i++) { 2622 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]); 2623 if (ret) { 2624 fwd_progress = i; 2625 return ret; 2626 } 2627 } 2628 return 0; 2629 } 2630 2631 static void rcu_torture_fwd_prog_cleanup(void) 2632 { 2633 int i; 2634 struct rcu_fwd *rfp; 2635 2636 if (!rcu_fwds || !fwd_prog_tasks) 2637 return; 2638 for (i = 0; i < fwd_progress; i++) 2639 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]); 2640 unregister_oom_notifier(&rcutorture_oom_nb); 2641 mutex_lock(&rcu_fwd_mutex); 2642 rfp = rcu_fwds; 2643 rcu_fwds = NULL; 2644 mutex_unlock(&rcu_fwd_mutex); 2645 kfree(rfp); 2646 kfree(fwd_prog_tasks); 2647 fwd_prog_tasks = NULL; 2648 } 2649 2650 /* Callback function for RCU barrier testing. */ 2651 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2652 { 2653 atomic_inc(&barrier_cbs_invoked); 2654 } 2655 2656 /* IPI handler to get callback posted on desired CPU, if online. */ 2657 static void rcu_torture_barrier1cb(void *rcu_void) 2658 { 2659 struct rcu_head *rhp = rcu_void; 2660 2661 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2662 } 2663 2664 /* kthread function to register callbacks used to test RCU barriers. */ 2665 static int rcu_torture_barrier_cbs(void *arg) 2666 { 2667 long myid = (long)arg; 2668 bool lastphase = false; 2669 bool newphase; 2670 struct rcu_head rcu; 2671 2672 init_rcu_head_on_stack(&rcu); 2673 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2674 set_user_nice(current, MAX_NICE); 2675 do { 2676 wait_event(barrier_cbs_wq[myid], 2677 (newphase = 2678 smp_load_acquire(&barrier_phase)) != lastphase || 2679 torture_must_stop()); 2680 lastphase = newphase; 2681 if (torture_must_stop()) 2682 break; 2683 /* 2684 * The above smp_load_acquire() ensures barrier_phase load 2685 * is ordered before the following ->call(). 2686 */ 2687 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2688 &rcu, 1)) { 2689 // IPI failed, so use direct call from current CPU. 2690 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2691 } 2692 if (atomic_dec_and_test(&barrier_cbs_count)) 2693 wake_up(&barrier_wq); 2694 } while (!torture_must_stop()); 2695 if (cur_ops->cb_barrier != NULL) 2696 cur_ops->cb_barrier(); 2697 destroy_rcu_head_on_stack(&rcu); 2698 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2699 return 0; 2700 } 2701 2702 /* kthread function to drive and coordinate RCU barrier testing. */ 2703 static int rcu_torture_barrier(void *arg) 2704 { 2705 int i; 2706 2707 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2708 do { 2709 atomic_set(&barrier_cbs_invoked, 0); 2710 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2711 /* Ensure barrier_phase ordered after prior assignments. */ 2712 smp_store_release(&barrier_phase, !barrier_phase); 2713 for (i = 0; i < n_barrier_cbs; i++) 2714 wake_up(&barrier_cbs_wq[i]); 2715 wait_event(barrier_wq, 2716 atomic_read(&barrier_cbs_count) == 0 || 2717 torture_must_stop()); 2718 if (torture_must_stop()) 2719 break; 2720 n_barrier_attempts++; 2721 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2722 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2723 n_rcu_torture_barrier_error++; 2724 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2725 atomic_read(&barrier_cbs_invoked), 2726 n_barrier_cbs); 2727 WARN_ON(1); 2728 // Wait manually for the remaining callbacks 2729 i = 0; 2730 do { 2731 if (WARN_ON(i++ > HZ)) 2732 i = INT_MIN; 2733 schedule_timeout_interruptible(1); 2734 cur_ops->cb_barrier(); 2735 } while (atomic_read(&barrier_cbs_invoked) != 2736 n_barrier_cbs && 2737 !torture_must_stop()); 2738 smp_mb(); // Can't trust ordering if broken. 2739 if (!torture_must_stop()) 2740 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2741 atomic_read(&barrier_cbs_invoked)); 2742 } else { 2743 n_barrier_successes++; 2744 } 2745 schedule_timeout_interruptible(HZ / 10); 2746 } while (!torture_must_stop()); 2747 torture_kthread_stopping("rcu_torture_barrier"); 2748 return 0; 2749 } 2750 2751 /* Initialize RCU barrier testing. */ 2752 static int rcu_torture_barrier_init(void) 2753 { 2754 int i; 2755 int ret; 2756 2757 if (n_barrier_cbs <= 0) 2758 return 0; 2759 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2760 pr_alert("%s" TORTURE_FLAG 2761 " Call or barrier ops missing for %s,\n", 2762 torture_type, cur_ops->name); 2763 pr_alert("%s" TORTURE_FLAG 2764 " RCU barrier testing omitted from run.\n", 2765 torture_type); 2766 return 0; 2767 } 2768 atomic_set(&barrier_cbs_count, 0); 2769 atomic_set(&barrier_cbs_invoked, 0); 2770 barrier_cbs_tasks = 2771 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2772 GFP_KERNEL); 2773 barrier_cbs_wq = 2774 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2775 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2776 return -ENOMEM; 2777 for (i = 0; i < n_barrier_cbs; i++) { 2778 init_waitqueue_head(&barrier_cbs_wq[i]); 2779 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2780 (void *)(long)i, 2781 barrier_cbs_tasks[i]); 2782 if (ret) 2783 return ret; 2784 } 2785 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2786 } 2787 2788 /* Clean up after RCU barrier testing. */ 2789 static void rcu_torture_barrier_cleanup(void) 2790 { 2791 int i; 2792 2793 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2794 if (barrier_cbs_tasks != NULL) { 2795 for (i = 0; i < n_barrier_cbs; i++) 2796 torture_stop_kthread(rcu_torture_barrier_cbs, 2797 barrier_cbs_tasks[i]); 2798 kfree(barrier_cbs_tasks); 2799 barrier_cbs_tasks = NULL; 2800 } 2801 if (barrier_cbs_wq != NULL) { 2802 kfree(barrier_cbs_wq); 2803 barrier_cbs_wq = NULL; 2804 } 2805 } 2806 2807 static bool rcu_torture_can_boost(void) 2808 { 2809 static int boost_warn_once; 2810 int prio; 2811 2812 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2813 return false; 2814 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state) 2815 return false; 2816 2817 prio = rcu_get_gp_kthreads_prio(); 2818 if (!prio) 2819 return false; 2820 2821 if (prio < 2) { 2822 if (boost_warn_once == 1) 2823 return false; 2824 2825 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2826 boost_warn_once = 1; 2827 return false; 2828 } 2829 2830 return true; 2831 } 2832 2833 static bool read_exit_child_stop; 2834 static bool read_exit_child_stopped; 2835 static wait_queue_head_t read_exit_wq; 2836 2837 // Child kthread which just does an rcutorture reader and exits. 2838 static int rcu_torture_read_exit_child(void *trsp_in) 2839 { 2840 struct torture_random_state *trsp = trsp_in; 2841 2842 set_user_nice(current, MAX_NICE); 2843 // Minimize time between reading and exiting. 2844 while (!kthread_should_stop()) 2845 schedule_timeout_uninterruptible(1); 2846 (void)rcu_torture_one_read(trsp, -1); 2847 return 0; 2848 } 2849 2850 // Parent kthread which creates and destroys read-exit child kthreads. 2851 static int rcu_torture_read_exit(void *unused) 2852 { 2853 int count = 0; 2854 bool errexit = false; 2855 int i; 2856 struct task_struct *tsp; 2857 DEFINE_TORTURE_RANDOM(trs); 2858 2859 // Allocate and initialize. 2860 set_user_nice(current, MAX_NICE); 2861 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 2862 2863 // Each pass through this loop does one read-exit episode. 2864 do { 2865 if (++count > read_exit_burst) { 2866 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 2867 rcu_barrier(); // Wait for task_struct free, avoid OOM. 2868 for (i = 0; i < read_exit_delay; i++) { 2869 schedule_timeout_uninterruptible(HZ); 2870 if (READ_ONCE(read_exit_child_stop)) 2871 break; 2872 } 2873 if (!READ_ONCE(read_exit_child_stop)) 2874 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 2875 count = 0; 2876 } 2877 if (READ_ONCE(read_exit_child_stop)) 2878 break; 2879 // Spawn child. 2880 tsp = kthread_run(rcu_torture_read_exit_child, 2881 &trs, "%s", 2882 "rcu_torture_read_exit_child"); 2883 if (IS_ERR(tsp)) { 2884 TOROUT_ERRSTRING("out of memory"); 2885 errexit = true; 2886 tsp = NULL; 2887 break; 2888 } 2889 cond_resched(); 2890 kthread_stop(tsp); 2891 n_read_exits ++; 2892 stutter_wait("rcu_torture_read_exit"); 2893 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 2894 2895 // Clean up and exit. 2896 smp_store_release(&read_exit_child_stopped, true); // After reaping. 2897 smp_mb(); // Store before wakeup. 2898 wake_up(&read_exit_wq); 2899 while (!torture_must_stop()) 2900 schedule_timeout_uninterruptible(1); 2901 torture_kthread_stopping("rcu_torture_read_exit"); 2902 return 0; 2903 } 2904 2905 static int rcu_torture_read_exit_init(void) 2906 { 2907 if (read_exit_burst <= 0) 2908 return 0; 2909 init_waitqueue_head(&read_exit_wq); 2910 read_exit_child_stop = false; 2911 read_exit_child_stopped = false; 2912 return torture_create_kthread(rcu_torture_read_exit, NULL, 2913 read_exit_task); 2914 } 2915 2916 static void rcu_torture_read_exit_cleanup(void) 2917 { 2918 if (!read_exit_task) 2919 return; 2920 WRITE_ONCE(read_exit_child_stop, true); 2921 smp_mb(); // Above write before wait. 2922 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 2923 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 2924 } 2925 2926 static enum cpuhp_state rcutor_hp; 2927 2928 static void 2929 rcu_torture_cleanup(void) 2930 { 2931 int firsttime; 2932 int flags = 0; 2933 unsigned long gp_seq = 0; 2934 int i; 2935 2936 if (torture_cleanup_begin()) { 2937 if (cur_ops->cb_barrier != NULL) { 2938 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 2939 cur_ops->cb_barrier(); 2940 } 2941 return; 2942 } 2943 if (!cur_ops) { 2944 torture_cleanup_end(); 2945 return; 2946 } 2947 2948 if (cur_ops->gp_kthread_dbg) 2949 cur_ops->gp_kthread_dbg(); 2950 rcu_torture_read_exit_cleanup(); 2951 rcu_torture_barrier_cleanup(); 2952 rcu_torture_fwd_prog_cleanup(); 2953 torture_stop_kthread(rcu_torture_stall, stall_task); 2954 torture_stop_kthread(rcu_torture_writer, writer_task); 2955 2956 if (nocb_tasks) { 2957 for (i = 0; i < nrealnocbers; i++) 2958 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 2959 kfree(nocb_tasks); 2960 nocb_tasks = NULL; 2961 } 2962 2963 if (reader_tasks) { 2964 for (i = 0; i < nrealreaders; i++) 2965 torture_stop_kthread(rcu_torture_reader, 2966 reader_tasks[i]); 2967 kfree(reader_tasks); 2968 reader_tasks = NULL; 2969 } 2970 kfree(rcu_torture_reader_mbchk); 2971 rcu_torture_reader_mbchk = NULL; 2972 2973 if (fakewriter_tasks) { 2974 for (i = 0; i < nfakewriters; i++) 2975 torture_stop_kthread(rcu_torture_fakewriter, 2976 fakewriter_tasks[i]); 2977 kfree(fakewriter_tasks); 2978 fakewriter_tasks = NULL; 2979 } 2980 2981 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2982 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2983 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 2984 cur_ops->name, (long)gp_seq, flags, 2985 rcutorture_seq_diff(gp_seq, start_gp_seq)); 2986 torture_stop_kthread(rcu_torture_stats, stats_task); 2987 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2988 if (rcu_torture_can_boost() && rcutor_hp >= 0) 2989 cpuhp_remove_state(rcutor_hp); 2990 2991 /* 2992 * Wait for all RCU callbacks to fire, then do torture-type-specific 2993 * cleanup operations. 2994 */ 2995 if (cur_ops->cb_barrier != NULL) { 2996 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier); 2997 cur_ops->cb_barrier(); 2998 } 2999 if (cur_ops->cleanup != NULL) 3000 cur_ops->cleanup(); 3001 3002 rcu_torture_mem_dump_obj(); 3003 3004 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 3005 3006 if (err_segs_recorded) { 3007 pr_alert("Failure/close-call rcutorture reader segments:\n"); 3008 if (rt_read_nsegs == 0) 3009 pr_alert("\t: No segments recorded!!!\n"); 3010 firsttime = 1; 3011 for (i = 0; i < rt_read_nsegs; i++) { 3012 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 3013 if (err_segs[i].rt_delay_jiffies != 0) { 3014 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 3015 err_segs[i].rt_delay_jiffies); 3016 firsttime = 0; 3017 } 3018 if (err_segs[i].rt_delay_ms != 0) { 3019 pr_cont("%s%ldms", firsttime ? "" : "+", 3020 err_segs[i].rt_delay_ms); 3021 firsttime = 0; 3022 } 3023 if (err_segs[i].rt_delay_us != 0) { 3024 pr_cont("%s%ldus", firsttime ? "" : "+", 3025 err_segs[i].rt_delay_us); 3026 firsttime = 0; 3027 } 3028 pr_cont("%s\n", 3029 err_segs[i].rt_preempted ? "preempted" : ""); 3030 3031 } 3032 } 3033 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 3034 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 3035 else if (torture_onoff_failures()) 3036 rcu_torture_print_module_parms(cur_ops, 3037 "End of test: RCU_HOTPLUG"); 3038 else 3039 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 3040 torture_cleanup_end(); 3041 } 3042 3043 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3044 static void rcu_torture_leak_cb(struct rcu_head *rhp) 3045 { 3046 } 3047 3048 static void rcu_torture_err_cb(struct rcu_head *rhp) 3049 { 3050 /* 3051 * This -might- happen due to race conditions, but is unlikely. 3052 * The scenario that leads to this happening is that the 3053 * first of the pair of duplicate callbacks is queued, 3054 * someone else starts a grace period that includes that 3055 * callback, then the second of the pair must wait for the 3056 * next grace period. Unlikely, but can happen. If it 3057 * does happen, the debug-objects subsystem won't have splatted. 3058 */ 3059 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 3060 } 3061 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3062 3063 /* 3064 * Verify that double-free causes debug-objects to complain, but only 3065 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 3066 * cannot be carried out. 3067 */ 3068 static void rcu_test_debug_objects(void) 3069 { 3070 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 3071 struct rcu_head rh1; 3072 struct rcu_head rh2; 3073 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 3074 3075 init_rcu_head_on_stack(&rh1); 3076 init_rcu_head_on_stack(&rh2); 3077 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 3078 3079 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 3080 preempt_disable(); /* Prevent preemption from interrupting test. */ 3081 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 3082 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 3083 local_irq_disable(); /* Make it harder to start a new grace period. */ 3084 call_rcu(&rh2, rcu_torture_leak_cb); 3085 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 3086 if (rhp) { 3087 call_rcu(rhp, rcu_torture_leak_cb); 3088 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 3089 } 3090 local_irq_enable(); 3091 rcu_read_unlock(); 3092 preempt_enable(); 3093 3094 /* Wait for them all to get done so we can safely return. */ 3095 rcu_barrier(); 3096 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 3097 destroy_rcu_head_on_stack(&rh1); 3098 destroy_rcu_head_on_stack(&rh2); 3099 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3100 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 3101 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 3102 } 3103 3104 static void rcutorture_sync(void) 3105 { 3106 static unsigned long n; 3107 3108 if (cur_ops->sync && !(++n & 0xfff)) 3109 cur_ops->sync(); 3110 } 3111 3112 static int __init 3113 rcu_torture_init(void) 3114 { 3115 long i; 3116 int cpu; 3117 int firsterr = 0; 3118 int flags = 0; 3119 unsigned long gp_seq = 0; 3120 static struct rcu_torture_ops *torture_ops[] = { 3121 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops, 3122 TASKS_OPS &tasks_rude_ops, TASKS_TRACING_OPS 3123 &trivial_ops, 3124 }; 3125 3126 if (!torture_init_begin(torture_type, verbose)) 3127 return -EBUSY; 3128 3129 /* Process args and tell the world that the torturer is on the job. */ 3130 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 3131 cur_ops = torture_ops[i]; 3132 if (strcmp(torture_type, cur_ops->name) == 0) 3133 break; 3134 } 3135 if (i == ARRAY_SIZE(torture_ops)) { 3136 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 3137 torture_type); 3138 pr_alert("rcu-torture types:"); 3139 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 3140 pr_cont(" %s", torture_ops[i]->name); 3141 pr_cont("\n"); 3142 firsterr = -EINVAL; 3143 cur_ops = NULL; 3144 goto unwind; 3145 } 3146 if (cur_ops->fqs == NULL && fqs_duration != 0) { 3147 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 3148 fqs_duration = 0; 3149 } 3150 if (cur_ops->init) 3151 cur_ops->init(); 3152 3153 if (nreaders >= 0) { 3154 nrealreaders = nreaders; 3155 } else { 3156 nrealreaders = num_online_cpus() - 2 - nreaders; 3157 if (nrealreaders <= 0) 3158 nrealreaders = 1; 3159 } 3160 rcu_torture_print_module_parms(cur_ops, "Start of test"); 3161 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 3162 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 3163 start_gp_seq = gp_seq; 3164 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 3165 cur_ops->name, (long)gp_seq, flags); 3166 3167 /* Set up the freelist. */ 3168 3169 INIT_LIST_HEAD(&rcu_torture_freelist); 3170 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 3171 rcu_tortures[i].rtort_mbtest = 0; 3172 list_add_tail(&rcu_tortures[i].rtort_free, 3173 &rcu_torture_freelist); 3174 } 3175 3176 /* Initialize the statistics so that each run gets its own numbers. */ 3177 3178 rcu_torture_current = NULL; 3179 rcu_torture_current_version = 0; 3180 atomic_set(&n_rcu_torture_alloc, 0); 3181 atomic_set(&n_rcu_torture_alloc_fail, 0); 3182 atomic_set(&n_rcu_torture_free, 0); 3183 atomic_set(&n_rcu_torture_mberror, 0); 3184 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3185 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3186 atomic_set(&n_rcu_torture_error, 0); 3187 n_rcu_torture_barrier_error = 0; 3188 n_rcu_torture_boost_ktrerror = 0; 3189 n_rcu_torture_boost_rterror = 0; 3190 n_rcu_torture_boost_failure = 0; 3191 n_rcu_torture_boosts = 0; 3192 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3193 atomic_set(&rcu_torture_wcount[i], 0); 3194 for_each_possible_cpu(cpu) { 3195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3196 per_cpu(rcu_torture_count, cpu)[i] = 0; 3197 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3198 } 3199 } 3200 err_segs_recorded = 0; 3201 rt_read_nsegs = 0; 3202 3203 /* Start up the kthreads. */ 3204 3205 rcu_torture_write_types(); 3206 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3207 writer_task); 3208 if (torture_init_error(firsterr)) 3209 goto unwind; 3210 if (nfakewriters > 0) { 3211 fakewriter_tasks = kcalloc(nfakewriters, 3212 sizeof(fakewriter_tasks[0]), 3213 GFP_KERNEL); 3214 if (fakewriter_tasks == NULL) { 3215 TOROUT_ERRSTRING("out of memory"); 3216 firsterr = -ENOMEM; 3217 goto unwind; 3218 } 3219 } 3220 for (i = 0; i < nfakewriters; i++) { 3221 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3222 NULL, fakewriter_tasks[i]); 3223 if (torture_init_error(firsterr)) 3224 goto unwind; 3225 } 3226 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3227 GFP_KERNEL); 3228 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3229 GFP_KERNEL); 3230 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3231 TOROUT_ERRSTRING("out of memory"); 3232 firsterr = -ENOMEM; 3233 goto unwind; 3234 } 3235 for (i = 0; i < nrealreaders; i++) { 3236 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3237 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3238 reader_tasks[i]); 3239 if (torture_init_error(firsterr)) 3240 goto unwind; 3241 } 3242 nrealnocbers = nocbs_nthreads; 3243 if (WARN_ON(nrealnocbers < 0)) 3244 nrealnocbers = 1; 3245 if (WARN_ON(nocbs_toggle < 0)) 3246 nocbs_toggle = HZ; 3247 if (nrealnocbers > 0) { 3248 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3249 if (nocb_tasks == NULL) { 3250 TOROUT_ERRSTRING("out of memory"); 3251 firsterr = -ENOMEM; 3252 goto unwind; 3253 } 3254 } else { 3255 nocb_tasks = NULL; 3256 } 3257 for (i = 0; i < nrealnocbers; i++) { 3258 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3259 if (torture_init_error(firsterr)) 3260 goto unwind; 3261 } 3262 if (stat_interval > 0) { 3263 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3264 stats_task); 3265 if (torture_init_error(firsterr)) 3266 goto unwind; 3267 } 3268 if (test_no_idle_hz && shuffle_interval > 0) { 3269 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3270 if (torture_init_error(firsterr)) 3271 goto unwind; 3272 } 3273 if (stutter < 0) 3274 stutter = 0; 3275 if (stutter) { 3276 int t; 3277 3278 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3279 firsterr = torture_stutter_init(stutter * HZ, t); 3280 if (torture_init_error(firsterr)) 3281 goto unwind; 3282 } 3283 if (fqs_duration < 0) 3284 fqs_duration = 0; 3285 if (fqs_duration) { 3286 /* Create the fqs thread */ 3287 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3288 fqs_task); 3289 if (torture_init_error(firsterr)) 3290 goto unwind; 3291 } 3292 if (test_boost_interval < 1) 3293 test_boost_interval = 1; 3294 if (test_boost_duration < 2) 3295 test_boost_duration = 2; 3296 if (rcu_torture_can_boost()) { 3297 3298 boost_starttime = jiffies + test_boost_interval * HZ; 3299 3300 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3301 rcutorture_booster_init, 3302 rcutorture_booster_cleanup); 3303 rcutor_hp = firsterr; 3304 if (torture_init_error(firsterr)) 3305 goto unwind; 3306 3307 // Testing RCU priority boosting requires rcutorture do 3308 // some serious abuse. Counter this by running ksoftirqd 3309 // at higher priority. 3310 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) { 3311 for_each_online_cpu(cpu) { 3312 struct sched_param sp; 3313 struct task_struct *t; 3314 3315 t = per_cpu(ksoftirqd, cpu); 3316 WARN_ON_ONCE(!t); 3317 sp.sched_priority = 2; 3318 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 3319 } 3320 } 3321 } 3322 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3323 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3324 if (torture_init_error(firsterr)) 3325 goto unwind; 3326 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3327 rcutorture_sync); 3328 if (torture_init_error(firsterr)) 3329 goto unwind; 3330 firsterr = rcu_torture_stall_init(); 3331 if (torture_init_error(firsterr)) 3332 goto unwind; 3333 firsterr = rcu_torture_fwd_prog_init(); 3334 if (torture_init_error(firsterr)) 3335 goto unwind; 3336 firsterr = rcu_torture_barrier_init(); 3337 if (torture_init_error(firsterr)) 3338 goto unwind; 3339 firsterr = rcu_torture_read_exit_init(); 3340 if (torture_init_error(firsterr)) 3341 goto unwind; 3342 if (object_debug) 3343 rcu_test_debug_objects(); 3344 torture_init_end(); 3345 return 0; 3346 3347 unwind: 3348 torture_init_end(); 3349 rcu_torture_cleanup(); 3350 if (shutdown_secs) { 3351 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3352 kernel_power_off(); 3353 } 3354 return firsterr; 3355 } 3356 3357 module_init(rcu_torture_init); 3358 module_exit(rcu_torture_cleanup); 3359