1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 50 #include "rcu.h" 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 54 55 /* Bits for ->extendables field, extendables param, and related definitions. */ 56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 65 #define RCUTORTURE_MAX_EXTEND \ 66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 69 /* Must be power of two minus one. */ 70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 71 72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 74 torture_param(int, fqs_duration, 0, 75 "Duration of fqs bursts (us), 0 to disable"); 76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 80 torture_param(int, fwd_progress_holdoff, 60, 81 "Time between forward-progress tests (s)"); 82 torture_param(bool, fwd_progress_need_resched, 1, 83 "Hide cond_resched() behind need_resched()"); 84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 86 torture_param(bool, gp_normal, false, 87 "Use normal (non-expedited) GP wait primitives"); 88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 92 torture_param(int, n_barrier_cbs, 0, 93 "# of callbacks/kthreads for barrier testing"); 94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 95 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 96 torture_param(int, object_debug, 0, 97 "Enable debug-object double call_rcu() testing"); 98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 99 torture_param(int, onoff_interval, 0, 100 "Time between CPU hotplugs (jiffies), 0=disable"); 101 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable"); 102 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)"); 103 torture_param(int, read_exit_delay, 13, 104 "Delay between read-then-exit episodes (s)"); 105 torture_param(int, read_exit_burst, 16, 106 "# of read-then-exit bursts per episode, zero to disable"); 107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 110 torture_param(int, stall_cpu_holdoff, 10, 111 "Time to wait before starting stall (s)."); 112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 113 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 114 torture_param(int, stall_gp_kthread, 0, 115 "Grace-period kthread stall duration (s)."); 116 torture_param(int, stat_interval, 60, 117 "Number of seconds between stats printk()s"); 118 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 120 torture_param(int, test_boost_duration, 4, 121 "Duration of each boost test, seconds."); 122 torture_param(int, test_boost_interval, 7, 123 "Interval between boost tests, seconds."); 124 torture_param(bool, test_no_idle_hz, true, 125 "Test support for tickless idle CPUs"); 126 torture_param(int, verbose, 1, 127 "Enable verbose debugging printk()s"); 128 129 static char *torture_type = "rcu"; 130 module_param(torture_type, charp, 0444); 131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 132 133 static int nrealnocbers; 134 static int nrealreaders; 135 static struct task_struct *writer_task; 136 static struct task_struct **fakewriter_tasks; 137 static struct task_struct **reader_tasks; 138 static struct task_struct **nocb_tasks; 139 static struct task_struct *stats_task; 140 static struct task_struct *fqs_task; 141 static struct task_struct *boost_tasks[NR_CPUS]; 142 static struct task_struct *stall_task; 143 static struct task_struct *fwd_prog_task; 144 static struct task_struct **barrier_cbs_tasks; 145 static struct task_struct *barrier_task; 146 static struct task_struct *read_exit_task; 147 148 #define RCU_TORTURE_PIPE_LEN 10 149 150 // Mailbox-like structure to check RCU global memory ordering. 151 struct rcu_torture_reader_check { 152 unsigned long rtc_myloops; 153 int rtc_chkrdr; 154 unsigned long rtc_chkloops; 155 int rtc_ready; 156 struct rcu_torture_reader_check *rtc_assigner; 157 } ____cacheline_internodealigned_in_smp; 158 159 // Update-side data structure used to check RCU readers. 160 struct rcu_torture { 161 struct rcu_head rtort_rcu; 162 int rtort_pipe_count; 163 struct list_head rtort_free; 164 int rtort_mbtest; 165 struct rcu_torture_reader_check *rtort_chkp; 166 }; 167 168 static LIST_HEAD(rcu_torture_freelist); 169 static struct rcu_torture __rcu *rcu_torture_current; 170 static unsigned long rcu_torture_current_version; 171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 172 static DEFINE_SPINLOCK(rcu_torture_lock); 173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 177 static atomic_t n_rcu_torture_alloc; 178 static atomic_t n_rcu_torture_alloc_fail; 179 static atomic_t n_rcu_torture_free; 180 static atomic_t n_rcu_torture_mberror; 181 static atomic_t n_rcu_torture_mbchk_fail; 182 static atomic_t n_rcu_torture_mbchk_tries; 183 static atomic_t n_rcu_torture_error; 184 static long n_rcu_torture_barrier_error; 185 static long n_rcu_torture_boost_ktrerror; 186 static long n_rcu_torture_boost_rterror; 187 static long n_rcu_torture_boost_failure; 188 static long n_rcu_torture_boosts; 189 static atomic_long_t n_rcu_torture_timers; 190 static long n_barrier_attempts; 191 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 192 static unsigned long n_read_exits; 193 static struct list_head rcu_torture_removed; 194 static unsigned long shutdown_jiffies; 195 static unsigned long start_gp_seq; 196 static atomic_long_t n_nocb_offload; 197 static atomic_long_t n_nocb_deoffload; 198 199 static int rcu_torture_writer_state; 200 #define RTWS_FIXED_DELAY 0 201 #define RTWS_DELAY 1 202 #define RTWS_REPLACE 2 203 #define RTWS_DEF_FREE 3 204 #define RTWS_EXP_SYNC 4 205 #define RTWS_COND_GET 5 206 #define RTWS_COND_SYNC 6 207 #define RTWS_POLL_GET 7 208 #define RTWS_POLL_WAIT 8 209 #define RTWS_SYNC 9 210 #define RTWS_STUTTER 10 211 #define RTWS_STOPPING 11 212 static const char * const rcu_torture_writer_state_names[] = { 213 "RTWS_FIXED_DELAY", 214 "RTWS_DELAY", 215 "RTWS_REPLACE", 216 "RTWS_DEF_FREE", 217 "RTWS_EXP_SYNC", 218 "RTWS_COND_GET", 219 "RTWS_COND_SYNC", 220 "RTWS_POLL_GET", 221 "RTWS_POLL_WAIT", 222 "RTWS_SYNC", 223 "RTWS_STUTTER", 224 "RTWS_STOPPING", 225 }; 226 227 /* Record reader segment types and duration for first failing read. */ 228 struct rt_read_seg { 229 int rt_readstate; 230 unsigned long rt_delay_jiffies; 231 unsigned long rt_delay_ms; 232 unsigned long rt_delay_us; 233 bool rt_preempted; 234 }; 235 static int err_segs_recorded; 236 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 237 static int rt_read_nsegs; 238 239 static const char *rcu_torture_writer_state_getname(void) 240 { 241 unsigned int i = READ_ONCE(rcu_torture_writer_state); 242 243 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 244 return "???"; 245 return rcu_torture_writer_state_names[i]; 246 } 247 248 #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_PREEMPT_RT) 249 # define rcu_can_boost() 1 250 #else 251 # define rcu_can_boost() 0 252 #endif 253 254 #ifdef CONFIG_RCU_TRACE 255 static u64 notrace rcu_trace_clock_local(void) 256 { 257 u64 ts = trace_clock_local(); 258 259 (void)do_div(ts, NSEC_PER_USEC); 260 return ts; 261 } 262 #else /* #ifdef CONFIG_RCU_TRACE */ 263 static u64 notrace rcu_trace_clock_local(void) 264 { 265 return 0ULL; 266 } 267 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 268 269 /* 270 * Stop aggressive CPU-hog tests a bit before the end of the test in order 271 * to avoid interfering with test shutdown. 272 */ 273 static bool shutdown_time_arrived(void) 274 { 275 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 276 } 277 278 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 279 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 280 /* and boost task create/destroy. */ 281 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 282 static bool barrier_phase; /* Test phase. */ 283 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 286 287 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 288 289 /* 290 * Allocate an element from the rcu_tortures pool. 291 */ 292 static struct rcu_torture * 293 rcu_torture_alloc(void) 294 { 295 struct list_head *p; 296 297 spin_lock_bh(&rcu_torture_lock); 298 if (list_empty(&rcu_torture_freelist)) { 299 atomic_inc(&n_rcu_torture_alloc_fail); 300 spin_unlock_bh(&rcu_torture_lock); 301 return NULL; 302 } 303 atomic_inc(&n_rcu_torture_alloc); 304 p = rcu_torture_freelist.next; 305 list_del_init(p); 306 spin_unlock_bh(&rcu_torture_lock); 307 return container_of(p, struct rcu_torture, rtort_free); 308 } 309 310 /* 311 * Free an element to the rcu_tortures pool. 312 */ 313 static void 314 rcu_torture_free(struct rcu_torture *p) 315 { 316 atomic_inc(&n_rcu_torture_free); 317 spin_lock_bh(&rcu_torture_lock); 318 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 319 spin_unlock_bh(&rcu_torture_lock); 320 } 321 322 /* 323 * Operations vector for selecting different types of tests. 324 */ 325 326 struct rcu_torture_ops { 327 int ttype; 328 void (*init)(void); 329 void (*cleanup)(void); 330 int (*readlock)(void); 331 void (*read_delay)(struct torture_random_state *rrsp, 332 struct rt_read_seg *rtrsp); 333 void (*readunlock)(int idx); 334 unsigned long (*get_gp_seq)(void); 335 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 336 void (*deferred_free)(struct rcu_torture *p); 337 void (*sync)(void); 338 void (*exp_sync)(void); 339 unsigned long (*get_gp_state)(void); 340 unsigned long (*start_gp_poll)(void); 341 bool (*poll_gp_state)(unsigned long oldstate); 342 void (*cond_sync)(unsigned long oldstate); 343 call_rcu_func_t call; 344 void (*cb_barrier)(void); 345 void (*fqs)(void); 346 void (*stats)(void); 347 void (*gp_kthread_dbg)(void); 348 int (*stall_dur)(void); 349 int irq_capable; 350 int can_boost; 351 int extendables; 352 int slow_gps; 353 const char *name; 354 }; 355 356 static struct rcu_torture_ops *cur_ops; 357 358 /* 359 * Definitions for rcu torture testing. 360 */ 361 362 static int rcu_torture_read_lock(void) __acquires(RCU) 363 { 364 rcu_read_lock(); 365 return 0; 366 } 367 368 static void 369 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 370 { 371 unsigned long started; 372 unsigned long completed; 373 const unsigned long shortdelay_us = 200; 374 unsigned long longdelay_ms = 300; 375 unsigned long long ts; 376 377 /* We want a short delay sometimes to make a reader delay the grace 378 * period, and we want a long delay occasionally to trigger 379 * force_quiescent_state. */ 380 381 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 382 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 383 started = cur_ops->get_gp_seq(); 384 ts = rcu_trace_clock_local(); 385 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 386 longdelay_ms = 5; /* Avoid triggering BH limits. */ 387 mdelay(longdelay_ms); 388 rtrsp->rt_delay_ms = longdelay_ms; 389 completed = cur_ops->get_gp_seq(); 390 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 391 started, completed); 392 } 393 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 394 udelay(shortdelay_us); 395 rtrsp->rt_delay_us = shortdelay_us; 396 } 397 if (!preempt_count() && 398 !(torture_random(rrsp) % (nrealreaders * 500))) { 399 torture_preempt_schedule(); /* QS only if preemptible. */ 400 rtrsp->rt_preempted = true; 401 } 402 } 403 404 static void rcu_torture_read_unlock(int idx) __releases(RCU) 405 { 406 rcu_read_unlock(); 407 } 408 409 /* 410 * Update callback in the pipe. This should be invoked after a grace period. 411 */ 412 static bool 413 rcu_torture_pipe_update_one(struct rcu_torture *rp) 414 { 415 int i; 416 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 417 418 if (rtrcp) { 419 WRITE_ONCE(rp->rtort_chkp, NULL); 420 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 421 } 422 i = READ_ONCE(rp->rtort_pipe_count); 423 if (i > RCU_TORTURE_PIPE_LEN) 424 i = RCU_TORTURE_PIPE_LEN; 425 atomic_inc(&rcu_torture_wcount[i]); 426 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 427 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 428 rp->rtort_mbtest = 0; 429 return true; 430 } 431 return false; 432 } 433 434 /* 435 * Update all callbacks in the pipe. Suitable for synchronous grace-period 436 * primitives. 437 */ 438 static void 439 rcu_torture_pipe_update(struct rcu_torture *old_rp) 440 { 441 struct rcu_torture *rp; 442 struct rcu_torture *rp1; 443 444 if (old_rp) 445 list_add(&old_rp->rtort_free, &rcu_torture_removed); 446 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 447 if (rcu_torture_pipe_update_one(rp)) { 448 list_del(&rp->rtort_free); 449 rcu_torture_free(rp); 450 } 451 } 452 } 453 454 static void 455 rcu_torture_cb(struct rcu_head *p) 456 { 457 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 458 459 if (torture_must_stop_irq()) { 460 /* Test is ending, just drop callbacks on the floor. */ 461 /* The next initialization will pick up the pieces. */ 462 return; 463 } 464 if (rcu_torture_pipe_update_one(rp)) 465 rcu_torture_free(rp); 466 else 467 cur_ops->deferred_free(rp); 468 } 469 470 static unsigned long rcu_no_completed(void) 471 { 472 return 0; 473 } 474 475 static void rcu_torture_deferred_free(struct rcu_torture *p) 476 { 477 call_rcu(&p->rtort_rcu, rcu_torture_cb); 478 } 479 480 static void rcu_sync_torture_init(void) 481 { 482 INIT_LIST_HEAD(&rcu_torture_removed); 483 } 484 485 static struct rcu_torture_ops rcu_ops = { 486 .ttype = RCU_FLAVOR, 487 .init = rcu_sync_torture_init, 488 .readlock = rcu_torture_read_lock, 489 .read_delay = rcu_read_delay, 490 .readunlock = rcu_torture_read_unlock, 491 .get_gp_seq = rcu_get_gp_seq, 492 .gp_diff = rcu_seq_diff, 493 .deferred_free = rcu_torture_deferred_free, 494 .sync = synchronize_rcu, 495 .exp_sync = synchronize_rcu_expedited, 496 .get_gp_state = get_state_synchronize_rcu, 497 .start_gp_poll = start_poll_synchronize_rcu, 498 .poll_gp_state = poll_state_synchronize_rcu, 499 .cond_sync = cond_synchronize_rcu, 500 .call = call_rcu, 501 .cb_barrier = rcu_barrier, 502 .fqs = rcu_force_quiescent_state, 503 .stats = NULL, 504 .gp_kthread_dbg = show_rcu_gp_kthreads, 505 .stall_dur = rcu_jiffies_till_stall_check, 506 .irq_capable = 1, 507 .can_boost = rcu_can_boost(), 508 .extendables = RCUTORTURE_MAX_EXTEND, 509 .name = "rcu" 510 }; 511 512 /* 513 * Don't even think about trying any of these in real life!!! 514 * The names includes "busted", and they really means it! 515 * The only purpose of these functions is to provide a buggy RCU 516 * implementation to make sure that rcutorture correctly emits 517 * buggy-RCU error messages. 518 */ 519 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 520 { 521 /* This is a deliberate bug for testing purposes only! */ 522 rcu_torture_cb(&p->rtort_rcu); 523 } 524 525 static void synchronize_rcu_busted(void) 526 { 527 /* This is a deliberate bug for testing purposes only! */ 528 } 529 530 static void 531 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 532 { 533 /* This is a deliberate bug for testing purposes only! */ 534 func(head); 535 } 536 537 static struct rcu_torture_ops rcu_busted_ops = { 538 .ttype = INVALID_RCU_FLAVOR, 539 .init = rcu_sync_torture_init, 540 .readlock = rcu_torture_read_lock, 541 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 542 .readunlock = rcu_torture_read_unlock, 543 .get_gp_seq = rcu_no_completed, 544 .deferred_free = rcu_busted_torture_deferred_free, 545 .sync = synchronize_rcu_busted, 546 .exp_sync = synchronize_rcu_busted, 547 .call = call_rcu_busted, 548 .cb_barrier = NULL, 549 .fqs = NULL, 550 .stats = NULL, 551 .irq_capable = 1, 552 .name = "busted" 553 }; 554 555 /* 556 * Definitions for srcu torture testing. 557 */ 558 559 DEFINE_STATIC_SRCU(srcu_ctl); 560 static struct srcu_struct srcu_ctld; 561 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 562 563 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 564 { 565 return srcu_read_lock(srcu_ctlp); 566 } 567 568 static void 569 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 570 { 571 long delay; 572 const long uspertick = 1000000 / HZ; 573 const long longdelay = 10; 574 575 /* We want there to be long-running readers, but not all the time. */ 576 577 delay = torture_random(rrsp) % 578 (nrealreaders * 2 * longdelay * uspertick); 579 if (!delay && in_task()) { 580 schedule_timeout_interruptible(longdelay); 581 rtrsp->rt_delay_jiffies = longdelay; 582 } else { 583 rcu_read_delay(rrsp, rtrsp); 584 } 585 } 586 587 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 588 { 589 srcu_read_unlock(srcu_ctlp, idx); 590 } 591 592 static unsigned long srcu_torture_completed(void) 593 { 594 return srcu_batches_completed(srcu_ctlp); 595 } 596 597 static void srcu_torture_deferred_free(struct rcu_torture *rp) 598 { 599 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 600 } 601 602 static void srcu_torture_synchronize(void) 603 { 604 synchronize_srcu(srcu_ctlp); 605 } 606 607 static unsigned long srcu_torture_get_gp_state(void) 608 { 609 return get_state_synchronize_srcu(srcu_ctlp); 610 } 611 612 static unsigned long srcu_torture_start_gp_poll(void) 613 { 614 return start_poll_synchronize_srcu(srcu_ctlp); 615 } 616 617 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 618 { 619 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 620 } 621 622 static void srcu_torture_call(struct rcu_head *head, 623 rcu_callback_t func) 624 { 625 call_srcu(srcu_ctlp, head, func); 626 } 627 628 static void srcu_torture_barrier(void) 629 { 630 srcu_barrier(srcu_ctlp); 631 } 632 633 static void srcu_torture_stats(void) 634 { 635 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 636 } 637 638 static void srcu_torture_synchronize_expedited(void) 639 { 640 synchronize_srcu_expedited(srcu_ctlp); 641 } 642 643 static struct rcu_torture_ops srcu_ops = { 644 .ttype = SRCU_FLAVOR, 645 .init = rcu_sync_torture_init, 646 .readlock = srcu_torture_read_lock, 647 .read_delay = srcu_read_delay, 648 .readunlock = srcu_torture_read_unlock, 649 .get_gp_seq = srcu_torture_completed, 650 .deferred_free = srcu_torture_deferred_free, 651 .sync = srcu_torture_synchronize, 652 .exp_sync = srcu_torture_synchronize_expedited, 653 .get_gp_state = srcu_torture_get_gp_state, 654 .start_gp_poll = srcu_torture_start_gp_poll, 655 .poll_gp_state = srcu_torture_poll_gp_state, 656 .call = srcu_torture_call, 657 .cb_barrier = srcu_torture_barrier, 658 .stats = srcu_torture_stats, 659 .irq_capable = 1, 660 .name = "srcu" 661 }; 662 663 static void srcu_torture_init(void) 664 { 665 rcu_sync_torture_init(); 666 WARN_ON(init_srcu_struct(&srcu_ctld)); 667 srcu_ctlp = &srcu_ctld; 668 } 669 670 static void srcu_torture_cleanup(void) 671 { 672 cleanup_srcu_struct(&srcu_ctld); 673 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 674 } 675 676 /* As above, but dynamically allocated. */ 677 static struct rcu_torture_ops srcud_ops = { 678 .ttype = SRCU_FLAVOR, 679 .init = srcu_torture_init, 680 .cleanup = srcu_torture_cleanup, 681 .readlock = srcu_torture_read_lock, 682 .read_delay = srcu_read_delay, 683 .readunlock = srcu_torture_read_unlock, 684 .get_gp_seq = srcu_torture_completed, 685 .deferred_free = srcu_torture_deferred_free, 686 .sync = srcu_torture_synchronize, 687 .exp_sync = srcu_torture_synchronize_expedited, 688 .call = srcu_torture_call, 689 .cb_barrier = srcu_torture_barrier, 690 .stats = srcu_torture_stats, 691 .irq_capable = 1, 692 .name = "srcud" 693 }; 694 695 /* As above, but broken due to inappropriate reader extension. */ 696 static struct rcu_torture_ops busted_srcud_ops = { 697 .ttype = SRCU_FLAVOR, 698 .init = srcu_torture_init, 699 .cleanup = srcu_torture_cleanup, 700 .readlock = srcu_torture_read_lock, 701 .read_delay = rcu_read_delay, 702 .readunlock = srcu_torture_read_unlock, 703 .get_gp_seq = srcu_torture_completed, 704 .deferred_free = srcu_torture_deferred_free, 705 .sync = srcu_torture_synchronize, 706 .exp_sync = srcu_torture_synchronize_expedited, 707 .call = srcu_torture_call, 708 .cb_barrier = srcu_torture_barrier, 709 .stats = srcu_torture_stats, 710 .irq_capable = 1, 711 .extendables = RCUTORTURE_MAX_EXTEND, 712 .name = "busted_srcud" 713 }; 714 715 /* 716 * Definitions for RCU-tasks torture testing. 717 */ 718 719 static int tasks_torture_read_lock(void) 720 { 721 return 0; 722 } 723 724 static void tasks_torture_read_unlock(int idx) 725 { 726 } 727 728 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 729 { 730 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 731 } 732 733 static void synchronize_rcu_mult_test(void) 734 { 735 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 736 } 737 738 static struct rcu_torture_ops tasks_ops = { 739 .ttype = RCU_TASKS_FLAVOR, 740 .init = rcu_sync_torture_init, 741 .readlock = tasks_torture_read_lock, 742 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 743 .readunlock = tasks_torture_read_unlock, 744 .get_gp_seq = rcu_no_completed, 745 .deferred_free = rcu_tasks_torture_deferred_free, 746 .sync = synchronize_rcu_tasks, 747 .exp_sync = synchronize_rcu_mult_test, 748 .call = call_rcu_tasks, 749 .cb_barrier = rcu_barrier_tasks, 750 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 751 .fqs = NULL, 752 .stats = NULL, 753 .irq_capable = 1, 754 .slow_gps = 1, 755 .name = "tasks" 756 }; 757 758 /* 759 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 760 * This implementation does not necessarily work well with CPU hotplug. 761 */ 762 763 static void synchronize_rcu_trivial(void) 764 { 765 int cpu; 766 767 for_each_online_cpu(cpu) { 768 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 769 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 770 } 771 } 772 773 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 774 { 775 preempt_disable(); 776 return 0; 777 } 778 779 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 780 { 781 preempt_enable(); 782 } 783 784 static struct rcu_torture_ops trivial_ops = { 785 .ttype = RCU_TRIVIAL_FLAVOR, 786 .init = rcu_sync_torture_init, 787 .readlock = rcu_torture_read_lock_trivial, 788 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 789 .readunlock = rcu_torture_read_unlock_trivial, 790 .get_gp_seq = rcu_no_completed, 791 .sync = synchronize_rcu_trivial, 792 .exp_sync = synchronize_rcu_trivial, 793 .fqs = NULL, 794 .stats = NULL, 795 .irq_capable = 1, 796 .name = "trivial" 797 }; 798 799 /* 800 * Definitions for rude RCU-tasks torture testing. 801 */ 802 803 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 804 { 805 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 806 } 807 808 static struct rcu_torture_ops tasks_rude_ops = { 809 .ttype = RCU_TASKS_RUDE_FLAVOR, 810 .init = rcu_sync_torture_init, 811 .readlock = rcu_torture_read_lock_trivial, 812 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 813 .readunlock = rcu_torture_read_unlock_trivial, 814 .get_gp_seq = rcu_no_completed, 815 .deferred_free = rcu_tasks_rude_torture_deferred_free, 816 .sync = synchronize_rcu_tasks_rude, 817 .exp_sync = synchronize_rcu_tasks_rude, 818 .call = call_rcu_tasks_rude, 819 .cb_barrier = rcu_barrier_tasks_rude, 820 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 821 .fqs = NULL, 822 .stats = NULL, 823 .irq_capable = 1, 824 .name = "tasks-rude" 825 }; 826 827 /* 828 * Definitions for tracing RCU-tasks torture testing. 829 */ 830 831 static int tasks_tracing_torture_read_lock(void) 832 { 833 rcu_read_lock_trace(); 834 return 0; 835 } 836 837 static void tasks_tracing_torture_read_unlock(int idx) 838 { 839 rcu_read_unlock_trace(); 840 } 841 842 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 843 { 844 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 845 } 846 847 static struct rcu_torture_ops tasks_tracing_ops = { 848 .ttype = RCU_TASKS_TRACING_FLAVOR, 849 .init = rcu_sync_torture_init, 850 .readlock = tasks_tracing_torture_read_lock, 851 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 852 .readunlock = tasks_tracing_torture_read_unlock, 853 .get_gp_seq = rcu_no_completed, 854 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 855 .sync = synchronize_rcu_tasks_trace, 856 .exp_sync = synchronize_rcu_tasks_trace, 857 .call = call_rcu_tasks_trace, 858 .cb_barrier = rcu_barrier_tasks_trace, 859 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 860 .fqs = NULL, 861 .stats = NULL, 862 .irq_capable = 1, 863 .slow_gps = 1, 864 .name = "tasks-tracing" 865 }; 866 867 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 868 { 869 if (!cur_ops->gp_diff) 870 return new - old; 871 return cur_ops->gp_diff(new, old); 872 } 873 874 static bool __maybe_unused torturing_tasks(void) 875 { 876 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 877 } 878 879 /* 880 * RCU torture priority-boost testing. Runs one real-time thread per 881 * CPU for moderate bursts, repeatedly registering RCU callbacks and 882 * spinning waiting for them to be invoked. If a given callback takes 883 * too long to be invoked, we assume that priority inversion has occurred. 884 */ 885 886 struct rcu_boost_inflight { 887 struct rcu_head rcu; 888 int inflight; 889 }; 890 891 static void rcu_torture_boost_cb(struct rcu_head *head) 892 { 893 struct rcu_boost_inflight *rbip = 894 container_of(head, struct rcu_boost_inflight, rcu); 895 896 /* Ensure RCU-core accesses precede clearing ->inflight */ 897 smp_store_release(&rbip->inflight, 0); 898 } 899 900 static int old_rt_runtime = -1; 901 902 static void rcu_torture_disable_rt_throttle(void) 903 { 904 /* 905 * Disable RT throttling so that rcutorture's boost threads don't get 906 * throttled. Only possible if rcutorture is built-in otherwise the 907 * user should manually do this by setting the sched_rt_period_us and 908 * sched_rt_runtime sysctls. 909 */ 910 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 911 return; 912 913 old_rt_runtime = sysctl_sched_rt_runtime; 914 sysctl_sched_rt_runtime = -1; 915 } 916 917 static void rcu_torture_enable_rt_throttle(void) 918 { 919 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 920 return; 921 922 sysctl_sched_rt_runtime = old_rt_runtime; 923 old_rt_runtime = -1; 924 } 925 926 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 927 { 928 static int dbg_done; 929 930 if (end - start > test_boost_duration * HZ - HZ / 2) { 931 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 932 n_rcu_torture_boost_failure++; 933 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) 934 cur_ops->gp_kthread_dbg(); 935 936 return true; /* failed */ 937 } 938 939 return false; /* passed */ 940 } 941 942 static int rcu_torture_boost(void *arg) 943 { 944 unsigned long call_rcu_time; 945 unsigned long endtime; 946 unsigned long oldstarttime; 947 struct rcu_boost_inflight rbi = { .inflight = 0 }; 948 949 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 950 951 /* Set real-time priority. */ 952 sched_set_fifo_low(current); 953 954 init_rcu_head_on_stack(&rbi.rcu); 955 /* Each pass through the following loop does one boost-test cycle. */ 956 do { 957 bool failed = false; // Test failed already in this test interval 958 bool firsttime = true; 959 960 /* Increment n_rcu_torture_boosts once per boost-test */ 961 while (!kthread_should_stop()) { 962 if (mutex_trylock(&boost_mutex)) { 963 n_rcu_torture_boosts++; 964 mutex_unlock(&boost_mutex); 965 break; 966 } 967 schedule_timeout_uninterruptible(1); 968 } 969 if (kthread_should_stop()) 970 goto checkwait; 971 972 /* Wait for the next test interval. */ 973 oldstarttime = boost_starttime; 974 while (time_before(jiffies, oldstarttime)) { 975 schedule_timeout_interruptible(oldstarttime - jiffies); 976 if (stutter_wait("rcu_torture_boost")) 977 sched_set_fifo_low(current); 978 if (torture_must_stop()) 979 goto checkwait; 980 } 981 982 /* Do one boost-test interval. */ 983 endtime = oldstarttime + test_boost_duration * HZ; 984 while (time_before(jiffies, endtime)) { 985 /* If we don't have a callback in flight, post one. */ 986 if (!smp_load_acquire(&rbi.inflight)) { 987 /* RCU core before ->inflight = 1. */ 988 smp_store_release(&rbi.inflight, 1); 989 cur_ops->call(&rbi.rcu, rcu_torture_boost_cb); 990 /* Check if the boost test failed */ 991 if (!firsttime && !failed) 992 failed = rcu_torture_boost_failed(call_rcu_time, jiffies); 993 call_rcu_time = jiffies; 994 firsttime = false; 995 } 996 if (stutter_wait("rcu_torture_boost")) 997 sched_set_fifo_low(current); 998 if (torture_must_stop()) 999 goto checkwait; 1000 } 1001 1002 /* 1003 * If boost never happened, then inflight will always be 1, in 1004 * this case the boost check would never happen in the above 1005 * loop so do another one here. 1006 */ 1007 if (!firsttime && !failed && smp_load_acquire(&rbi.inflight)) 1008 rcu_torture_boost_failed(call_rcu_time, jiffies); 1009 1010 /* 1011 * Set the start time of the next test interval. 1012 * Yes, this is vulnerable to long delays, but such 1013 * delays simply cause a false negative for the next 1014 * interval. Besides, we are running at RT priority, 1015 * so delays should be relatively rare. 1016 */ 1017 while (oldstarttime == boost_starttime && 1018 !kthread_should_stop()) { 1019 if (mutex_trylock(&boost_mutex)) { 1020 boost_starttime = jiffies + 1021 test_boost_interval * HZ; 1022 mutex_unlock(&boost_mutex); 1023 break; 1024 } 1025 schedule_timeout_uninterruptible(1); 1026 } 1027 1028 /* Go do the stutter. */ 1029 checkwait: if (stutter_wait("rcu_torture_boost")) 1030 sched_set_fifo_low(current); 1031 } while (!torture_must_stop()); 1032 1033 while (smp_load_acquire(&rbi.inflight)) 1034 schedule_timeout_uninterruptible(1); // rcu_barrier() deadlocks. 1035 1036 /* Clean up and exit. */ 1037 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 1038 torture_shutdown_absorb("rcu_torture_boost"); 1039 schedule_timeout_uninterruptible(1); 1040 } 1041 destroy_rcu_head_on_stack(&rbi.rcu); 1042 torture_kthread_stopping("rcu_torture_boost"); 1043 return 0; 1044 } 1045 1046 /* 1047 * RCU torture force-quiescent-state kthread. Repeatedly induces 1048 * bursts of calls to force_quiescent_state(), increasing the probability 1049 * of occurrence of some important types of race conditions. 1050 */ 1051 static int 1052 rcu_torture_fqs(void *arg) 1053 { 1054 unsigned long fqs_resume_time; 1055 int fqs_burst_remaining; 1056 int oldnice = task_nice(current); 1057 1058 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1059 do { 1060 fqs_resume_time = jiffies + fqs_stutter * HZ; 1061 while (time_before(jiffies, fqs_resume_time) && 1062 !kthread_should_stop()) { 1063 schedule_timeout_interruptible(1); 1064 } 1065 fqs_burst_remaining = fqs_duration; 1066 while (fqs_burst_remaining > 0 && 1067 !kthread_should_stop()) { 1068 cur_ops->fqs(); 1069 udelay(fqs_holdoff); 1070 fqs_burst_remaining -= fqs_holdoff; 1071 } 1072 if (stutter_wait("rcu_torture_fqs")) 1073 sched_set_normal(current, oldnice); 1074 } while (!torture_must_stop()); 1075 torture_kthread_stopping("rcu_torture_fqs"); 1076 return 0; 1077 } 1078 1079 // Used by writers to randomly choose from the available grace-period 1080 // primitives. The only purpose of the initialization is to size the array. 1081 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; 1082 static int nsynctypes; 1083 1084 /* 1085 * Determine which grace-period primitives are available. 1086 */ 1087 static void rcu_torture_write_types(void) 1088 { 1089 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1090 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; 1091 1092 /* Initialize synctype[] array. If none set, take default. */ 1093 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) 1094 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true; 1095 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1096 synctype[nsynctypes++] = RTWS_COND_GET; 1097 pr_info("%s: Testing conditional GPs.\n", __func__); 1098 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1099 pr_alert("%s: gp_cond without primitives.\n", __func__); 1100 } 1101 if (gp_exp1 && cur_ops->exp_sync) { 1102 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1103 pr_info("%s: Testing expedited GPs.\n", __func__); 1104 } else if (gp_exp && !cur_ops->exp_sync) { 1105 pr_alert("%s: gp_exp without primitives.\n", __func__); 1106 } 1107 if (gp_normal1 && cur_ops->deferred_free) { 1108 synctype[nsynctypes++] = RTWS_DEF_FREE; 1109 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1110 } else if (gp_normal && !cur_ops->deferred_free) { 1111 pr_alert("%s: gp_normal without primitives.\n", __func__); 1112 } 1113 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1114 synctype[nsynctypes++] = RTWS_POLL_GET; 1115 pr_info("%s: Testing polling GPs.\n", __func__); 1116 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1117 pr_alert("%s: gp_poll without primitives.\n", __func__); 1118 } 1119 if (gp_sync1 && cur_ops->sync) { 1120 synctype[nsynctypes++] = RTWS_SYNC; 1121 pr_info("%s: Testing normal GPs.\n", __func__); 1122 } else if (gp_sync && !cur_ops->sync) { 1123 pr_alert("%s: gp_sync without primitives.\n", __func__); 1124 } 1125 } 1126 1127 /* 1128 * RCU torture writer kthread. Repeatedly substitutes a new structure 1129 * for that pointed to by rcu_torture_current, freeing the old structure 1130 * after a series of grace periods (the "pipeline"). 1131 */ 1132 static int 1133 rcu_torture_writer(void *arg) 1134 { 1135 bool boot_ended; 1136 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1137 unsigned long cookie; 1138 int expediting = 0; 1139 unsigned long gp_snap; 1140 int i; 1141 int idx; 1142 int oldnice = task_nice(current); 1143 struct rcu_torture *rp; 1144 struct rcu_torture *old_rp; 1145 static DEFINE_TORTURE_RANDOM(rand); 1146 bool stutter_waited; 1147 1148 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1149 if (!can_expedite) 1150 pr_alert("%s" TORTURE_FLAG 1151 " GP expediting controlled from boot/sysfs for %s.\n", 1152 torture_type, cur_ops->name); 1153 if (WARN_ONCE(nsynctypes == 0, 1154 "rcu_torture_writer: No update-side primitives.\n")) { 1155 /* 1156 * No updates primitives, so don't try updating. 1157 * The resulting test won't be testing much, hence the 1158 * above WARN_ONCE(). 1159 */ 1160 rcu_torture_writer_state = RTWS_STOPPING; 1161 torture_kthread_stopping("rcu_torture_writer"); 1162 } 1163 1164 do { 1165 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1166 torture_hrtimeout_us(500, 1000, &rand); 1167 rp = rcu_torture_alloc(); 1168 if (rp == NULL) 1169 continue; 1170 rp->rtort_pipe_count = 0; 1171 rcu_torture_writer_state = RTWS_DELAY; 1172 udelay(torture_random(&rand) & 0x3ff); 1173 rcu_torture_writer_state = RTWS_REPLACE; 1174 old_rp = rcu_dereference_check(rcu_torture_current, 1175 current == writer_task); 1176 rp->rtort_mbtest = 1; 1177 rcu_assign_pointer(rcu_torture_current, rp); 1178 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1179 if (old_rp) { 1180 i = old_rp->rtort_pipe_count; 1181 if (i > RCU_TORTURE_PIPE_LEN) 1182 i = RCU_TORTURE_PIPE_LEN; 1183 atomic_inc(&rcu_torture_wcount[i]); 1184 WRITE_ONCE(old_rp->rtort_pipe_count, 1185 old_rp->rtort_pipe_count + 1); 1186 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1187 idx = cur_ops->readlock(); 1188 cookie = cur_ops->get_gp_state(); 1189 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && 1190 cur_ops->poll_gp_state(cookie), 1191 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1192 __func__, 1193 rcu_torture_writer_state_getname(), 1194 rcu_torture_writer_state, 1195 cookie, cur_ops->get_gp_state()); 1196 cur_ops->readunlock(idx); 1197 } 1198 switch (synctype[torture_random(&rand) % nsynctypes]) { 1199 case RTWS_DEF_FREE: 1200 rcu_torture_writer_state = RTWS_DEF_FREE; 1201 cur_ops->deferred_free(old_rp); 1202 break; 1203 case RTWS_EXP_SYNC: 1204 rcu_torture_writer_state = RTWS_EXP_SYNC; 1205 cur_ops->exp_sync(); 1206 rcu_torture_pipe_update(old_rp); 1207 break; 1208 case RTWS_COND_GET: 1209 rcu_torture_writer_state = RTWS_COND_GET; 1210 gp_snap = cur_ops->get_gp_state(); 1211 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1212 rcu_torture_writer_state = RTWS_COND_SYNC; 1213 cur_ops->cond_sync(gp_snap); 1214 rcu_torture_pipe_update(old_rp); 1215 break; 1216 case RTWS_POLL_GET: 1217 rcu_torture_writer_state = RTWS_POLL_GET; 1218 gp_snap = cur_ops->start_gp_poll(); 1219 rcu_torture_writer_state = RTWS_POLL_WAIT; 1220 while (!cur_ops->poll_gp_state(gp_snap)) 1221 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1222 &rand); 1223 rcu_torture_pipe_update(old_rp); 1224 break; 1225 case RTWS_SYNC: 1226 rcu_torture_writer_state = RTWS_SYNC; 1227 cur_ops->sync(); 1228 rcu_torture_pipe_update(old_rp); 1229 break; 1230 default: 1231 WARN_ON_ONCE(1); 1232 break; 1233 } 1234 } 1235 WRITE_ONCE(rcu_torture_current_version, 1236 rcu_torture_current_version + 1); 1237 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1238 if (can_expedite && 1239 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1240 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1241 if (expediting >= 0) 1242 rcu_expedite_gp(); 1243 else 1244 rcu_unexpedite_gp(); 1245 if (++expediting > 3) 1246 expediting = -expediting; 1247 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1248 can_expedite = !rcu_gp_is_expedited() && 1249 !rcu_gp_is_normal(); 1250 } 1251 rcu_torture_writer_state = RTWS_STUTTER; 1252 boot_ended = rcu_inkernel_boot_has_ended(); 1253 stutter_waited = stutter_wait("rcu_torture_writer"); 1254 if (stutter_waited && 1255 !READ_ONCE(rcu_fwd_cb_nodelay) && 1256 !cur_ops->slow_gps && 1257 !torture_must_stop() && 1258 boot_ended) 1259 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1260 if (list_empty(&rcu_tortures[i].rtort_free) && 1261 rcu_access_pointer(rcu_torture_current) != 1262 &rcu_tortures[i]) { 1263 rcu_ftrace_dump(DUMP_ALL); 1264 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1265 } 1266 if (stutter_waited) 1267 sched_set_normal(current, oldnice); 1268 } while (!torture_must_stop()); 1269 rcu_torture_current = NULL; // Let stats task know that we are done. 1270 /* Reset expediting back to unexpedited. */ 1271 if (expediting > 0) 1272 expediting = -expediting; 1273 while (can_expedite && expediting++ < 0) 1274 rcu_unexpedite_gp(); 1275 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1276 if (!can_expedite) 1277 pr_alert("%s" TORTURE_FLAG 1278 " Dynamic grace-period expediting was disabled.\n", 1279 torture_type); 1280 rcu_torture_writer_state = RTWS_STOPPING; 1281 torture_kthread_stopping("rcu_torture_writer"); 1282 return 0; 1283 } 1284 1285 /* 1286 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1287 * delay between calls. 1288 */ 1289 static int 1290 rcu_torture_fakewriter(void *arg) 1291 { 1292 unsigned long gp_snap; 1293 DEFINE_TORTURE_RANDOM(rand); 1294 1295 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1296 set_user_nice(current, MAX_NICE); 1297 1298 do { 1299 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1300 if (cur_ops->cb_barrier != NULL && 1301 torture_random(&rand) % (nfakewriters * 8) == 0) { 1302 cur_ops->cb_barrier(); 1303 } else { 1304 switch (synctype[torture_random(&rand) % nsynctypes]) { 1305 case RTWS_DEF_FREE: 1306 break; 1307 case RTWS_EXP_SYNC: 1308 cur_ops->exp_sync(); 1309 break; 1310 case RTWS_COND_GET: 1311 gp_snap = cur_ops->get_gp_state(); 1312 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1313 cur_ops->cond_sync(gp_snap); 1314 break; 1315 case RTWS_POLL_GET: 1316 gp_snap = cur_ops->start_gp_poll(); 1317 while (!cur_ops->poll_gp_state(gp_snap)) { 1318 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1319 &rand); 1320 } 1321 break; 1322 case RTWS_SYNC: 1323 cur_ops->sync(); 1324 break; 1325 default: 1326 WARN_ON_ONCE(1); 1327 break; 1328 } 1329 } 1330 stutter_wait("rcu_torture_fakewriter"); 1331 } while (!torture_must_stop()); 1332 1333 torture_kthread_stopping("rcu_torture_fakewriter"); 1334 return 0; 1335 } 1336 1337 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1338 { 1339 kfree(rhp); 1340 } 1341 1342 // Set up and carry out testing of RCU's global memory ordering 1343 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1344 struct torture_random_state *trsp) 1345 { 1346 unsigned long loops; 1347 int noc = torture_num_online_cpus(); 1348 int rdrchked; 1349 int rdrchker; 1350 struct rcu_torture_reader_check *rtrcp; // Me. 1351 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1352 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1353 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1354 1355 if (myid < 0) 1356 return; // Don't try this from timer handlers. 1357 1358 // Increment my counter. 1359 rtrcp = &rcu_torture_reader_mbchk[myid]; 1360 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1361 1362 // Attempt to assign someone else some checking work. 1363 rdrchked = torture_random(trsp) % nrealreaders; 1364 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1365 rdrchker = torture_random(trsp) % nrealreaders; 1366 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1367 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1368 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1369 !READ_ONCE(rtp->rtort_chkp) && 1370 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1371 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1372 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1373 rtrcp->rtc_chkrdr = rdrchked; 1374 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1375 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1376 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1377 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1378 } 1379 1380 // If assigned some completed work, do it! 1381 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1382 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1383 return; // No work or work not yet ready. 1384 rdrchked = rtrcp_assigner->rtc_chkrdr; 1385 if (WARN_ON_ONCE(rdrchked < 0)) 1386 return; 1387 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1388 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1389 atomic_inc(&n_rcu_torture_mbchk_tries); 1390 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1391 atomic_inc(&n_rcu_torture_mbchk_fail); 1392 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1393 rtrcp_assigner->rtc_ready = 0; 1394 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1395 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1396 } 1397 1398 /* 1399 * Do one extension of an RCU read-side critical section using the 1400 * current reader state in readstate (set to zero for initial entry 1401 * to extended critical section), set the new state as specified by 1402 * newstate (set to zero for final exit from extended critical section), 1403 * and random-number-generator state in trsp. If this is neither the 1404 * beginning or end of the critical section and if there was actually a 1405 * change, do a ->read_delay(). 1406 */ 1407 static void rcutorture_one_extend(int *readstate, int newstate, 1408 struct torture_random_state *trsp, 1409 struct rt_read_seg *rtrsp) 1410 { 1411 unsigned long flags; 1412 int idxnew = -1; 1413 int idxold = *readstate; 1414 int statesnew = ~*readstate & newstate; 1415 int statesold = *readstate & ~newstate; 1416 1417 WARN_ON_ONCE(idxold < 0); 1418 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1419 rtrsp->rt_readstate = newstate; 1420 1421 /* First, put new protection in place to avoid critical-section gap. */ 1422 if (statesnew & RCUTORTURE_RDR_BH) 1423 local_bh_disable(); 1424 if (statesnew & RCUTORTURE_RDR_IRQ) 1425 local_irq_disable(); 1426 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1427 preempt_disable(); 1428 if (statesnew & RCUTORTURE_RDR_RBH) 1429 rcu_read_lock_bh(); 1430 if (statesnew & RCUTORTURE_RDR_SCHED) 1431 rcu_read_lock_sched(); 1432 if (statesnew & RCUTORTURE_RDR_RCU) 1433 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1434 1435 /* Next, remove old protection, irq first due to bh conflict. */ 1436 if (statesold & RCUTORTURE_RDR_IRQ) 1437 local_irq_enable(); 1438 if (statesold & RCUTORTURE_RDR_BH) 1439 local_bh_enable(); 1440 if (statesold & RCUTORTURE_RDR_PREEMPT) 1441 preempt_enable(); 1442 if (statesold & RCUTORTURE_RDR_RBH) 1443 rcu_read_unlock_bh(); 1444 if (statesold & RCUTORTURE_RDR_SCHED) 1445 rcu_read_unlock_sched(); 1446 if (statesold & RCUTORTURE_RDR_RCU) { 1447 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1448 1449 if (lockit) 1450 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1451 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1452 if (lockit) 1453 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1454 } 1455 1456 /* Delay if neither beginning nor end and there was a change. */ 1457 if ((statesnew || statesold) && *readstate && newstate) 1458 cur_ops->read_delay(trsp, rtrsp); 1459 1460 /* Update the reader state. */ 1461 if (idxnew == -1) 1462 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1463 WARN_ON_ONCE(idxnew < 0); 1464 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1465 *readstate = idxnew | newstate; 1466 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1467 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1468 } 1469 1470 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1471 static int rcutorture_extend_mask_max(void) 1472 { 1473 int mask; 1474 1475 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1476 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1477 mask = mask | RCUTORTURE_RDR_RCU; 1478 return mask; 1479 } 1480 1481 /* Return a random protection state mask, but with at least one bit set. */ 1482 static int 1483 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1484 { 1485 int mask = rcutorture_extend_mask_max(); 1486 unsigned long randmask1 = torture_random(trsp) >> 8; 1487 unsigned long randmask2 = randmask1 >> 3; 1488 1489 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1490 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1491 if (!(randmask1 & 0x7)) 1492 mask = mask & randmask2; 1493 else 1494 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1495 /* Can't enable bh w/irq disabled. */ 1496 if ((mask & RCUTORTURE_RDR_IRQ) && 1497 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1498 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1499 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1500 return mask ?: RCUTORTURE_RDR_RCU; 1501 } 1502 1503 /* 1504 * Do a randomly selected number of extensions of an existing RCU read-side 1505 * critical section. 1506 */ 1507 static struct rt_read_seg * 1508 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1509 struct rt_read_seg *rtrsp) 1510 { 1511 int i; 1512 int j; 1513 int mask = rcutorture_extend_mask_max(); 1514 1515 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1516 if (!((mask - 1) & mask)) 1517 return rtrsp; /* Current RCU reader not extendable. */ 1518 /* Bias towards larger numbers of loops. */ 1519 i = (torture_random(trsp) >> 3); 1520 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1521 for (j = 0; j < i; j++) { 1522 mask = rcutorture_extend_mask(*readstate, trsp); 1523 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1524 } 1525 return &rtrsp[j]; 1526 } 1527 1528 /* 1529 * Do one read-side critical section, returning false if there was 1530 * no data to read. Can be invoked both from process context and 1531 * from a timer handler. 1532 */ 1533 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1534 { 1535 unsigned long cookie; 1536 int i; 1537 unsigned long started; 1538 unsigned long completed; 1539 int newstate; 1540 struct rcu_torture *p; 1541 int pipe_count; 1542 int readstate = 0; 1543 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1544 struct rt_read_seg *rtrsp = &rtseg[0]; 1545 struct rt_read_seg *rtrsp1; 1546 unsigned long long ts; 1547 1548 WARN_ON_ONCE(!rcu_is_watching()); 1549 newstate = rcutorture_extend_mask(readstate, trsp); 1550 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1551 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1552 cookie = cur_ops->get_gp_state(); 1553 started = cur_ops->get_gp_seq(); 1554 ts = rcu_trace_clock_local(); 1555 p = rcu_dereference_check(rcu_torture_current, 1556 rcu_read_lock_bh_held() || 1557 rcu_read_lock_sched_held() || 1558 srcu_read_lock_held(srcu_ctlp) || 1559 rcu_read_lock_trace_held() || 1560 torturing_tasks()); 1561 if (p == NULL) { 1562 /* Wait for rcu_torture_writer to get underway */ 1563 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1564 return false; 1565 } 1566 if (p->rtort_mbtest == 0) 1567 atomic_inc(&n_rcu_torture_mberror); 1568 rcu_torture_reader_do_mbchk(myid, p, trsp); 1569 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1570 preempt_disable(); 1571 pipe_count = READ_ONCE(p->rtort_pipe_count); 1572 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1573 /* Should not happen, but... */ 1574 pipe_count = RCU_TORTURE_PIPE_LEN; 1575 } 1576 completed = cur_ops->get_gp_seq(); 1577 if (pipe_count > 1) { 1578 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1579 ts, started, completed); 1580 rcu_ftrace_dump(DUMP_ALL); 1581 } 1582 __this_cpu_inc(rcu_torture_count[pipe_count]); 1583 completed = rcutorture_seq_diff(completed, started); 1584 if (completed > RCU_TORTURE_PIPE_LEN) { 1585 /* Should not happen, but... */ 1586 completed = RCU_TORTURE_PIPE_LEN; 1587 } 1588 __this_cpu_inc(rcu_torture_batch[completed]); 1589 preempt_enable(); 1590 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1591 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1592 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 1593 __func__, 1594 rcu_torture_writer_state_getname(), 1595 rcu_torture_writer_state, 1596 cookie, cur_ops->get_gp_state()); 1597 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1598 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1599 // This next splat is expected behavior if leakpointer, especially 1600 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 1601 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 1602 1603 /* If error or close call, record the sequence of reader protections. */ 1604 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1605 i = 0; 1606 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1607 err_segs[i++] = *rtrsp1; 1608 rt_read_nsegs = i; 1609 } 1610 1611 return true; 1612 } 1613 1614 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1615 1616 /* 1617 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1618 * incrementing the corresponding element of the pipeline array. The 1619 * counter in the element should never be greater than 1, otherwise, the 1620 * RCU implementation is broken. 1621 */ 1622 static void rcu_torture_timer(struct timer_list *unused) 1623 { 1624 atomic_long_inc(&n_rcu_torture_timers); 1625 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 1626 1627 /* Test call_rcu() invocation from interrupt handler. */ 1628 if (cur_ops->call) { 1629 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1630 1631 if (rhp) 1632 cur_ops->call(rhp, rcu_torture_timer_cb); 1633 } 1634 } 1635 1636 /* 1637 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1638 * incrementing the corresponding element of the pipeline array. The 1639 * counter in the element should never be greater than 1, otherwise, the 1640 * RCU implementation is broken. 1641 */ 1642 static int 1643 rcu_torture_reader(void *arg) 1644 { 1645 unsigned long lastsleep = jiffies; 1646 long myid = (long)arg; 1647 int mynumonline = myid; 1648 DEFINE_TORTURE_RANDOM(rand); 1649 struct timer_list t; 1650 1651 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1652 set_user_nice(current, MAX_NICE); 1653 if (irqreader && cur_ops->irq_capable) 1654 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1655 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1656 do { 1657 if (irqreader && cur_ops->irq_capable) { 1658 if (!timer_pending(&t)) 1659 mod_timer(&t, jiffies + 1); 1660 } 1661 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 1662 schedule_timeout_interruptible(HZ); 1663 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1664 torture_hrtimeout_us(500, 1000, &rand); 1665 lastsleep = jiffies + 10; 1666 } 1667 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 1668 schedule_timeout_interruptible(HZ / 5); 1669 stutter_wait("rcu_torture_reader"); 1670 } while (!torture_must_stop()); 1671 if (irqreader && cur_ops->irq_capable) { 1672 del_timer_sync(&t); 1673 destroy_timer_on_stack(&t); 1674 } 1675 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1676 torture_kthread_stopping("rcu_torture_reader"); 1677 return 0; 1678 } 1679 1680 /* 1681 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to 1682 * increase race probabilities and fuzzes the interval between toggling. 1683 */ 1684 static int rcu_nocb_toggle(void *arg) 1685 { 1686 int cpu; 1687 int maxcpu = -1; 1688 int oldnice = task_nice(current); 1689 long r; 1690 DEFINE_TORTURE_RANDOM(rand); 1691 ktime_t toggle_delay; 1692 unsigned long toggle_fuzz; 1693 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle); 1694 1695 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started"); 1696 while (!rcu_inkernel_boot_has_ended()) 1697 schedule_timeout_interruptible(HZ / 10); 1698 for_each_online_cpu(cpu) 1699 maxcpu = cpu; 1700 WARN_ON(maxcpu < 0); 1701 if (toggle_interval > ULONG_MAX) 1702 toggle_fuzz = ULONG_MAX >> 3; 1703 else 1704 toggle_fuzz = toggle_interval >> 3; 1705 if (toggle_fuzz <= 0) 1706 toggle_fuzz = NSEC_PER_USEC; 1707 do { 1708 r = torture_random(&rand); 1709 cpu = (r >> 4) % (maxcpu + 1); 1710 if (r & 0x1) { 1711 rcu_nocb_cpu_offload(cpu); 1712 atomic_long_inc(&n_nocb_offload); 1713 } else { 1714 rcu_nocb_cpu_deoffload(cpu); 1715 atomic_long_inc(&n_nocb_deoffload); 1716 } 1717 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval; 1718 set_current_state(TASK_INTERRUPTIBLE); 1719 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL); 1720 if (stutter_wait("rcu_nocb_toggle")) 1721 sched_set_normal(current, oldnice); 1722 } while (!torture_must_stop()); 1723 torture_kthread_stopping("rcu_nocb_toggle"); 1724 return 0; 1725 } 1726 1727 /* 1728 * Print torture statistics. Caller must ensure that there is only 1729 * one call to this function at a given time!!! This is normally 1730 * accomplished by relying on the module system to only have one copy 1731 * of the module loaded, and then by giving the rcu_torture_stats 1732 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1733 * thread is not running). 1734 */ 1735 static void 1736 rcu_torture_stats_print(void) 1737 { 1738 int cpu; 1739 int i; 1740 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1741 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1742 struct rcu_torture *rtcp; 1743 static unsigned long rtcv_snap = ULONG_MAX; 1744 static bool splatted; 1745 struct task_struct *wtp; 1746 1747 for_each_possible_cpu(cpu) { 1748 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1749 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1750 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1751 } 1752 } 1753 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1754 if (pipesummary[i] != 0) 1755 break; 1756 } 1757 1758 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1759 rtcp = rcu_access_pointer(rcu_torture_current); 1760 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1761 rtcp, 1762 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1763 rcu_torture_current_version, 1764 list_empty(&rcu_torture_freelist), 1765 atomic_read(&n_rcu_torture_alloc), 1766 atomic_read(&n_rcu_torture_alloc_fail), 1767 atomic_read(&n_rcu_torture_free)); 1768 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", 1769 atomic_read(&n_rcu_torture_mberror), 1770 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 1771 n_rcu_torture_barrier_error, 1772 n_rcu_torture_boost_ktrerror, 1773 n_rcu_torture_boost_rterror); 1774 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1775 n_rcu_torture_boost_failure, 1776 n_rcu_torture_boosts, 1777 atomic_long_read(&n_rcu_torture_timers)); 1778 torture_onoff_stats(); 1779 pr_cont("barrier: %ld/%ld:%ld ", 1780 data_race(n_barrier_successes), 1781 data_race(n_barrier_attempts), 1782 data_race(n_rcu_torture_barrier_error)); 1783 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic. 1784 pr_cont("nocb-toggles: %ld:%ld\n", 1785 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); 1786 1787 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1788 if (atomic_read(&n_rcu_torture_mberror) || 1789 atomic_read(&n_rcu_torture_mbchk_fail) || 1790 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1791 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1792 i > 1) { 1793 pr_cont("%s", "!!! "); 1794 atomic_inc(&n_rcu_torture_error); 1795 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1796 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 1797 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1798 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1799 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1800 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?) 1801 WARN_ON_ONCE(i > 1); // Too-short grace period 1802 } 1803 pr_cont("Reader Pipe: "); 1804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1805 pr_cont(" %ld", pipesummary[i]); 1806 pr_cont("\n"); 1807 1808 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1809 pr_cont("Reader Batch: "); 1810 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1811 pr_cont(" %ld", batchsummary[i]); 1812 pr_cont("\n"); 1813 1814 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1815 pr_cont("Free-Block Circulation: "); 1816 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1817 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1818 } 1819 pr_cont("\n"); 1820 1821 if (cur_ops->stats) 1822 cur_ops->stats(); 1823 if (rtcv_snap == rcu_torture_current_version && 1824 rcu_access_pointer(rcu_torture_current) && 1825 !rcu_stall_is_suppressed()) { 1826 int __maybe_unused flags = 0; 1827 unsigned long __maybe_unused gp_seq = 0; 1828 1829 rcutorture_get_gp_data(cur_ops->ttype, 1830 &flags, &gp_seq); 1831 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1832 &flags, &gp_seq); 1833 wtp = READ_ONCE(writer_task); 1834 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1835 rcu_torture_writer_state_getname(), 1836 rcu_torture_writer_state, gp_seq, flags, 1837 wtp == NULL ? ~0UL : wtp->state, 1838 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1839 if (!splatted && wtp) { 1840 sched_show_task(wtp); 1841 splatted = true; 1842 } 1843 if (cur_ops->gp_kthread_dbg) 1844 cur_ops->gp_kthread_dbg(); 1845 rcu_ftrace_dump(DUMP_ALL); 1846 } 1847 rtcv_snap = rcu_torture_current_version; 1848 } 1849 1850 /* 1851 * Periodically prints torture statistics, if periodic statistics printing 1852 * was specified via the stat_interval module parameter. 1853 */ 1854 static int 1855 rcu_torture_stats(void *arg) 1856 { 1857 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1858 do { 1859 schedule_timeout_interruptible(stat_interval * HZ); 1860 rcu_torture_stats_print(); 1861 torture_shutdown_absorb("rcu_torture_stats"); 1862 } while (!torture_must_stop()); 1863 torture_kthread_stopping("rcu_torture_stats"); 1864 1865 { 1866 struct rcu_head *rhp; 1867 struct kmem_cache *kcp; 1868 static int z; 1869 1870 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL); 1871 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); 1872 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); 1873 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):"); 1874 mem_dump_obj(ZERO_SIZE_PTR); 1875 pr_alert("mem_dump_obj(NULL):"); 1876 mem_dump_obj(NULL); 1877 pr_alert("mem_dump_obj(%px):", &rhp); 1878 mem_dump_obj(&rhp); 1879 pr_alert("mem_dump_obj(%px):", rhp); 1880 mem_dump_obj(rhp); 1881 pr_alert("mem_dump_obj(%px):", &rhp->func); 1882 mem_dump_obj(&rhp->func); 1883 pr_alert("mem_dump_obj(%px):", &z); 1884 mem_dump_obj(&z); 1885 kmem_cache_free(kcp, rhp); 1886 kmem_cache_destroy(kcp); 1887 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 1888 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 1889 pr_alert("mem_dump_obj(kmalloc %px):", rhp); 1890 mem_dump_obj(rhp); 1891 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func); 1892 mem_dump_obj(&rhp->func); 1893 kfree(rhp); 1894 rhp = vmalloc(4096); 1895 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp); 1896 pr_alert("mem_dump_obj(vmalloc %px):", rhp); 1897 mem_dump_obj(rhp); 1898 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func); 1899 mem_dump_obj(&rhp->func); 1900 vfree(rhp); 1901 } 1902 1903 return 0; 1904 } 1905 1906 static void 1907 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1908 { 1909 pr_alert("%s" TORTURE_FLAG 1910 "--- %s: nreaders=%d nfakewriters=%d " 1911 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1912 "shuffle_interval=%d stutter=%d irqreader=%d " 1913 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1914 "test_boost=%d/%d test_boost_interval=%d " 1915 "test_boost_duration=%d shutdown_secs=%d " 1916 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1917 "stall_cpu_block=%d " 1918 "n_barrier_cbs=%d " 1919 "onoff_interval=%d onoff_holdoff=%d " 1920 "read_exit_delay=%d read_exit_burst=%d " 1921 "nocbs_nthreads=%d nocbs_toggle=%d\n", 1922 torture_type, tag, nrealreaders, nfakewriters, 1923 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1924 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1925 test_boost, cur_ops->can_boost, 1926 test_boost_interval, test_boost_duration, shutdown_secs, 1927 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1928 stall_cpu_block, 1929 n_barrier_cbs, 1930 onoff_interval, onoff_holdoff, 1931 read_exit_delay, read_exit_burst, 1932 nocbs_nthreads, nocbs_toggle); 1933 } 1934 1935 static int rcutorture_booster_cleanup(unsigned int cpu) 1936 { 1937 struct task_struct *t; 1938 1939 if (boost_tasks[cpu] == NULL) 1940 return 0; 1941 mutex_lock(&boost_mutex); 1942 t = boost_tasks[cpu]; 1943 boost_tasks[cpu] = NULL; 1944 rcu_torture_enable_rt_throttle(); 1945 mutex_unlock(&boost_mutex); 1946 1947 /* This must be outside of the mutex, otherwise deadlock! */ 1948 torture_stop_kthread(rcu_torture_boost, t); 1949 return 0; 1950 } 1951 1952 static int rcutorture_booster_init(unsigned int cpu) 1953 { 1954 int retval; 1955 1956 if (boost_tasks[cpu] != NULL) 1957 return 0; /* Already created, nothing more to do. */ 1958 1959 /* Don't allow time recalculation while creating a new task. */ 1960 mutex_lock(&boost_mutex); 1961 rcu_torture_disable_rt_throttle(); 1962 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1963 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1964 cpu_to_node(cpu), 1965 "rcu_torture_boost"); 1966 if (IS_ERR(boost_tasks[cpu])) { 1967 retval = PTR_ERR(boost_tasks[cpu]); 1968 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1969 n_rcu_torture_boost_ktrerror++; 1970 boost_tasks[cpu] = NULL; 1971 mutex_unlock(&boost_mutex); 1972 return retval; 1973 } 1974 kthread_bind(boost_tasks[cpu], cpu); 1975 wake_up_process(boost_tasks[cpu]); 1976 mutex_unlock(&boost_mutex); 1977 return 0; 1978 } 1979 1980 /* 1981 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1982 * induces a CPU stall for the time specified by stall_cpu. 1983 */ 1984 static int rcu_torture_stall(void *args) 1985 { 1986 int idx; 1987 unsigned long stop_at; 1988 1989 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1990 if (stall_cpu_holdoff > 0) { 1991 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1992 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1993 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1994 } 1995 if (!kthread_should_stop() && stall_gp_kthread > 0) { 1996 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 1997 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 1998 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 1999 if (kthread_should_stop()) 2000 break; 2001 schedule_timeout_uninterruptible(HZ); 2002 } 2003 } 2004 if (!kthread_should_stop() && stall_cpu > 0) { 2005 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 2006 stop_at = ktime_get_seconds() + stall_cpu; 2007 /* RCU CPU stall is expected behavior in following code. */ 2008 idx = cur_ops->readlock(); 2009 if (stall_cpu_irqsoff) 2010 local_irq_disable(); 2011 else if (!stall_cpu_block) 2012 preempt_disable(); 2013 pr_alert("%s start on CPU %d.\n", 2014 __func__, raw_smp_processor_id()); 2015 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 2016 stop_at)) 2017 if (stall_cpu_block) 2018 schedule_timeout_uninterruptible(HZ); 2019 if (stall_cpu_irqsoff) 2020 local_irq_enable(); 2021 else if (!stall_cpu_block) 2022 preempt_enable(); 2023 cur_ops->readunlock(idx); 2024 } 2025 pr_alert("%s end.\n", __func__); 2026 torture_shutdown_absorb("rcu_torture_stall"); 2027 while (!kthread_should_stop()) 2028 schedule_timeout_interruptible(10 * HZ); 2029 return 0; 2030 } 2031 2032 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 2033 static int __init rcu_torture_stall_init(void) 2034 { 2035 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 2036 return 0; 2037 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 2038 } 2039 2040 /* State structure for forward-progress self-propagating RCU callback. */ 2041 struct fwd_cb_state { 2042 struct rcu_head rh; 2043 int stop; 2044 }; 2045 2046 /* 2047 * Forward-progress self-propagating RCU callback function. Because 2048 * callbacks run from softirq, this function is an implicit RCU read-side 2049 * critical section. 2050 */ 2051 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 2052 { 2053 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 2054 2055 if (READ_ONCE(fcsp->stop)) { 2056 WRITE_ONCE(fcsp->stop, 2); 2057 return; 2058 } 2059 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 2060 } 2061 2062 /* State for continuous-flood RCU callbacks. */ 2063 struct rcu_fwd_cb { 2064 struct rcu_head rh; 2065 struct rcu_fwd_cb *rfc_next; 2066 struct rcu_fwd *rfc_rfp; 2067 int rfc_gps; 2068 }; 2069 2070 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 2071 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 2072 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 2073 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 2074 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 2075 2076 struct rcu_launder_hist { 2077 long n_launders; 2078 unsigned long launder_gp_seq; 2079 }; 2080 2081 struct rcu_fwd { 2082 spinlock_t rcu_fwd_lock; 2083 struct rcu_fwd_cb *rcu_fwd_cb_head; 2084 struct rcu_fwd_cb **rcu_fwd_cb_tail; 2085 long n_launders_cb; 2086 unsigned long rcu_fwd_startat; 2087 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 2088 unsigned long rcu_launder_gp_seq_start; 2089 }; 2090 2091 static DEFINE_MUTEX(rcu_fwd_mutex); 2092 static struct rcu_fwd *rcu_fwds; 2093 static bool rcu_fwd_emergency_stop; 2094 2095 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2096 { 2097 unsigned long gps; 2098 unsigned long gps_old; 2099 int i; 2100 int j; 2101 2102 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2103 if (rfp->n_launders_hist[i].n_launders > 0) 2104 break; 2105 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 2106 __func__, jiffies - rfp->rcu_fwd_startat); 2107 gps_old = rfp->rcu_launder_gp_seq_start; 2108 for (j = 0; j <= i; j++) { 2109 gps = rfp->n_launders_hist[j].launder_gp_seq; 2110 pr_cont(" %ds/%d: %ld:%ld", 2111 j + 1, FWD_CBS_HIST_DIV, 2112 rfp->n_launders_hist[j].n_launders, 2113 rcutorture_seq_diff(gps, gps_old)); 2114 gps_old = gps; 2115 } 2116 pr_cont("\n"); 2117 } 2118 2119 /* Callback function for continuous-flood RCU callbacks. */ 2120 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2121 { 2122 unsigned long flags; 2123 int i; 2124 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2125 struct rcu_fwd_cb **rfcpp; 2126 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2127 2128 rfcp->rfc_next = NULL; 2129 rfcp->rfc_gps++; 2130 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2131 rfcpp = rfp->rcu_fwd_cb_tail; 2132 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2133 WRITE_ONCE(*rfcpp, rfcp); 2134 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2135 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2136 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2137 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2138 rfp->n_launders_hist[i].n_launders++; 2139 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2140 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2141 } 2142 2143 // Give the scheduler a chance, even on nohz_full CPUs. 2144 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2145 { 2146 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2147 // Real call_rcu() floods hit userspace, so emulate that. 2148 if (need_resched() || (iter & 0xfff)) 2149 schedule(); 2150 return; 2151 } 2152 // No userspace emulation: CB invocation throttles call_rcu() 2153 cond_resched(); 2154 } 2155 2156 /* 2157 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2158 * test is over or because we hit an OOM event. 2159 */ 2160 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2161 { 2162 unsigned long flags; 2163 unsigned long freed = 0; 2164 struct rcu_fwd_cb *rfcp; 2165 2166 for (;;) { 2167 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2168 rfcp = rfp->rcu_fwd_cb_head; 2169 if (!rfcp) { 2170 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2171 break; 2172 } 2173 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2174 if (!rfp->rcu_fwd_cb_head) 2175 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2176 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2177 kfree(rfcp); 2178 freed++; 2179 rcu_torture_fwd_prog_cond_resched(freed); 2180 if (tick_nohz_full_enabled()) { 2181 local_irq_save(flags); 2182 rcu_momentary_dyntick_idle(); 2183 local_irq_restore(flags); 2184 } 2185 } 2186 return freed; 2187 } 2188 2189 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2190 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2191 int *tested, int *tested_tries) 2192 { 2193 unsigned long cver; 2194 unsigned long dur; 2195 struct fwd_cb_state fcs; 2196 unsigned long gps; 2197 int idx; 2198 int sd; 2199 int sd4; 2200 bool selfpropcb = false; 2201 unsigned long stopat; 2202 static DEFINE_TORTURE_RANDOM(trs); 2203 2204 if (!cur_ops->sync) 2205 return; // Cannot do need_resched() forward progress testing without ->sync. 2206 if (cur_ops->call && cur_ops->cb_barrier) { 2207 init_rcu_head_on_stack(&fcs.rh); 2208 selfpropcb = true; 2209 } 2210 2211 /* Tight loop containing cond_resched(). */ 2212 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 2213 cur_ops->sync(); /* Later readers see above write. */ 2214 if (selfpropcb) { 2215 WRITE_ONCE(fcs.stop, 0); 2216 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2217 } 2218 cver = READ_ONCE(rcu_torture_current_version); 2219 gps = cur_ops->get_gp_seq(); 2220 sd = cur_ops->stall_dur() + 1; 2221 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2222 dur = sd4 + torture_random(&trs) % (sd - sd4); 2223 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2224 stopat = rfp->rcu_fwd_startat + dur; 2225 while (time_before(jiffies, stopat) && 2226 !shutdown_time_arrived() && 2227 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2228 idx = cur_ops->readlock(); 2229 udelay(10); 2230 cur_ops->readunlock(idx); 2231 if (!fwd_progress_need_resched || need_resched()) 2232 cond_resched(); 2233 } 2234 (*tested_tries)++; 2235 if (!time_before(jiffies, stopat) && 2236 !shutdown_time_arrived() && 2237 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2238 (*tested)++; 2239 cver = READ_ONCE(rcu_torture_current_version) - cver; 2240 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2241 WARN_ON(!cver && gps < 2); 2242 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 2243 } 2244 if (selfpropcb) { 2245 WRITE_ONCE(fcs.stop, 1); 2246 cur_ops->sync(); /* Wait for running CB to complete. */ 2247 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2248 } 2249 2250 if (selfpropcb) { 2251 WARN_ON(READ_ONCE(fcs.stop) != 2); 2252 destroy_rcu_head_on_stack(&fcs.rh); 2253 } 2254 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2255 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2256 } 2257 2258 /* Carry out call_rcu() forward-progress testing. */ 2259 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2260 { 2261 unsigned long cver; 2262 unsigned long flags; 2263 unsigned long gps; 2264 int i; 2265 long n_launders; 2266 long n_launders_cb_snap; 2267 long n_launders_sa; 2268 long n_max_cbs; 2269 long n_max_gps; 2270 struct rcu_fwd_cb *rfcp; 2271 struct rcu_fwd_cb *rfcpn; 2272 unsigned long stopat; 2273 unsigned long stoppedat; 2274 2275 if (READ_ONCE(rcu_fwd_emergency_stop)) 2276 return; /* Get out of the way quickly, no GP wait! */ 2277 if (!cur_ops->call) 2278 return; /* Can't do call_rcu() fwd prog without ->call. */ 2279 2280 /* Loop continuously posting RCU callbacks. */ 2281 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 2282 cur_ops->sync(); /* Later readers see above write. */ 2283 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2284 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2285 n_launders = 0; 2286 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2287 n_launders_sa = 0; 2288 n_max_cbs = 0; 2289 n_max_gps = 0; 2290 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2291 rfp->n_launders_hist[i].n_launders = 0; 2292 cver = READ_ONCE(rcu_torture_current_version); 2293 gps = cur_ops->get_gp_seq(); 2294 rfp->rcu_launder_gp_seq_start = gps; 2295 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2296 while (time_before(jiffies, stopat) && 2297 !shutdown_time_arrived() && 2298 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2299 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2300 rfcpn = NULL; 2301 if (rfcp) 2302 rfcpn = READ_ONCE(rfcp->rfc_next); 2303 if (rfcpn) { 2304 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2305 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2306 break; 2307 rfp->rcu_fwd_cb_head = rfcpn; 2308 n_launders++; 2309 n_launders_sa++; 2310 } else { 2311 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2312 if (WARN_ON_ONCE(!rfcp)) { 2313 schedule_timeout_interruptible(1); 2314 continue; 2315 } 2316 n_max_cbs++; 2317 n_launders_sa = 0; 2318 rfcp->rfc_gps = 0; 2319 rfcp->rfc_rfp = rfp; 2320 } 2321 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2322 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2323 if (tick_nohz_full_enabled()) { 2324 local_irq_save(flags); 2325 rcu_momentary_dyntick_idle(); 2326 local_irq_restore(flags); 2327 } 2328 } 2329 stoppedat = jiffies; 2330 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2331 cver = READ_ONCE(rcu_torture_current_version) - cver; 2332 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2333 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2334 (void)rcu_torture_fwd_prog_cbfree(rfp); 2335 2336 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2337 !shutdown_time_arrived()) { 2338 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2339 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2340 __func__, 2341 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2342 n_launders + n_max_cbs - n_launders_cb_snap, 2343 n_launders, n_launders_sa, 2344 n_max_gps, n_max_cbs, cver, gps); 2345 rcu_torture_fwd_cb_hist(rfp); 2346 } 2347 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2348 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2349 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2350 } 2351 2352 2353 /* 2354 * OOM notifier, but this only prints diagnostic information for the 2355 * current forward-progress test. 2356 */ 2357 static int rcutorture_oom_notify(struct notifier_block *self, 2358 unsigned long notused, void *nfreed) 2359 { 2360 struct rcu_fwd *rfp; 2361 2362 mutex_lock(&rcu_fwd_mutex); 2363 rfp = rcu_fwds; 2364 if (!rfp) { 2365 mutex_unlock(&rcu_fwd_mutex); 2366 return NOTIFY_OK; 2367 } 2368 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2369 __func__); 2370 rcu_torture_fwd_cb_hist(rfp); 2371 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2372 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2373 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2374 pr_info("%s: Freed %lu RCU callbacks.\n", 2375 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2376 rcu_barrier(); 2377 pr_info("%s: Freed %lu RCU callbacks.\n", 2378 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2379 rcu_barrier(); 2380 pr_info("%s: Freed %lu RCU callbacks.\n", 2381 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2382 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2383 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2384 pr_info("%s returning after OOM processing.\n", __func__); 2385 mutex_unlock(&rcu_fwd_mutex); 2386 return NOTIFY_OK; 2387 } 2388 2389 static struct notifier_block rcutorture_oom_nb = { 2390 .notifier_call = rcutorture_oom_notify 2391 }; 2392 2393 /* Carry out grace-period forward-progress testing. */ 2394 static int rcu_torture_fwd_prog(void *args) 2395 { 2396 int oldnice = task_nice(current); 2397 struct rcu_fwd *rfp = args; 2398 int tested = 0; 2399 int tested_tries = 0; 2400 2401 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2402 rcu_bind_current_to_nocb(); 2403 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2404 set_user_nice(current, MAX_NICE); 2405 do { 2406 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2407 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2408 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2409 rcu_inkernel_boot_has_ended()) 2410 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2411 if (rcu_inkernel_boot_has_ended()) 2412 rcu_torture_fwd_prog_cr(rfp); 2413 2414 /* Avoid slow periods, better to test when busy. */ 2415 if (stutter_wait("rcu_torture_fwd_prog")) 2416 sched_set_normal(current, oldnice); 2417 } while (!torture_must_stop()); 2418 /* Short runs might not contain a valid forward-progress attempt. */ 2419 WARN_ON(!tested && tested_tries >= 5); 2420 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2421 torture_kthread_stopping("rcu_torture_fwd_prog"); 2422 return 0; 2423 } 2424 2425 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2426 static int __init rcu_torture_fwd_prog_init(void) 2427 { 2428 struct rcu_fwd *rfp; 2429 2430 if (!fwd_progress) 2431 return 0; /* Not requested, so don't do it. */ 2432 if ((!cur_ops->sync && !cur_ops->call) || 2433 !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) { 2434 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2435 return 0; 2436 } 2437 if (stall_cpu > 0) { 2438 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2439 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2440 return -EINVAL; /* In module, can fail back to user. */ 2441 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2442 return 0; 2443 } 2444 if (fwd_progress_holdoff <= 0) 2445 fwd_progress_holdoff = 1; 2446 if (fwd_progress_div <= 0) 2447 fwd_progress_div = 4; 2448 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2449 if (!rfp) 2450 return -ENOMEM; 2451 spin_lock_init(&rfp->rcu_fwd_lock); 2452 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2453 mutex_lock(&rcu_fwd_mutex); 2454 rcu_fwds = rfp; 2455 mutex_unlock(&rcu_fwd_mutex); 2456 register_oom_notifier(&rcutorture_oom_nb); 2457 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2458 } 2459 2460 static void rcu_torture_fwd_prog_cleanup(void) 2461 { 2462 struct rcu_fwd *rfp; 2463 2464 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2465 rfp = rcu_fwds; 2466 mutex_lock(&rcu_fwd_mutex); 2467 rcu_fwds = NULL; 2468 mutex_unlock(&rcu_fwd_mutex); 2469 unregister_oom_notifier(&rcutorture_oom_nb); 2470 kfree(rfp); 2471 } 2472 2473 /* Callback function for RCU barrier testing. */ 2474 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2475 { 2476 atomic_inc(&barrier_cbs_invoked); 2477 } 2478 2479 /* IPI handler to get callback posted on desired CPU, if online. */ 2480 static void rcu_torture_barrier1cb(void *rcu_void) 2481 { 2482 struct rcu_head *rhp = rcu_void; 2483 2484 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2485 } 2486 2487 /* kthread function to register callbacks used to test RCU barriers. */ 2488 static int rcu_torture_barrier_cbs(void *arg) 2489 { 2490 long myid = (long)arg; 2491 bool lastphase = false; 2492 bool newphase; 2493 struct rcu_head rcu; 2494 2495 init_rcu_head_on_stack(&rcu); 2496 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2497 set_user_nice(current, MAX_NICE); 2498 do { 2499 wait_event(barrier_cbs_wq[myid], 2500 (newphase = 2501 smp_load_acquire(&barrier_phase)) != lastphase || 2502 torture_must_stop()); 2503 lastphase = newphase; 2504 if (torture_must_stop()) 2505 break; 2506 /* 2507 * The above smp_load_acquire() ensures barrier_phase load 2508 * is ordered before the following ->call(). 2509 */ 2510 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2511 &rcu, 1)) { 2512 // IPI failed, so use direct call from current CPU. 2513 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2514 } 2515 if (atomic_dec_and_test(&barrier_cbs_count)) 2516 wake_up(&barrier_wq); 2517 } while (!torture_must_stop()); 2518 if (cur_ops->cb_barrier != NULL) 2519 cur_ops->cb_barrier(); 2520 destroy_rcu_head_on_stack(&rcu); 2521 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2522 return 0; 2523 } 2524 2525 /* kthread function to drive and coordinate RCU barrier testing. */ 2526 static int rcu_torture_barrier(void *arg) 2527 { 2528 int i; 2529 2530 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2531 do { 2532 atomic_set(&barrier_cbs_invoked, 0); 2533 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2534 /* Ensure barrier_phase ordered after prior assignments. */ 2535 smp_store_release(&barrier_phase, !barrier_phase); 2536 for (i = 0; i < n_barrier_cbs; i++) 2537 wake_up(&barrier_cbs_wq[i]); 2538 wait_event(barrier_wq, 2539 atomic_read(&barrier_cbs_count) == 0 || 2540 torture_must_stop()); 2541 if (torture_must_stop()) 2542 break; 2543 n_barrier_attempts++; 2544 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2545 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2546 n_rcu_torture_barrier_error++; 2547 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2548 atomic_read(&barrier_cbs_invoked), 2549 n_barrier_cbs); 2550 WARN_ON(1); 2551 // Wait manually for the remaining callbacks 2552 i = 0; 2553 do { 2554 if (WARN_ON(i++ > HZ)) 2555 i = INT_MIN; 2556 schedule_timeout_interruptible(1); 2557 cur_ops->cb_barrier(); 2558 } while (atomic_read(&barrier_cbs_invoked) != 2559 n_barrier_cbs && 2560 !torture_must_stop()); 2561 smp_mb(); // Can't trust ordering if broken. 2562 if (!torture_must_stop()) 2563 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2564 atomic_read(&barrier_cbs_invoked)); 2565 } else { 2566 n_barrier_successes++; 2567 } 2568 schedule_timeout_interruptible(HZ / 10); 2569 } while (!torture_must_stop()); 2570 torture_kthread_stopping("rcu_torture_barrier"); 2571 return 0; 2572 } 2573 2574 /* Initialize RCU barrier testing. */ 2575 static int rcu_torture_barrier_init(void) 2576 { 2577 int i; 2578 int ret; 2579 2580 if (n_barrier_cbs <= 0) 2581 return 0; 2582 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2583 pr_alert("%s" TORTURE_FLAG 2584 " Call or barrier ops missing for %s,\n", 2585 torture_type, cur_ops->name); 2586 pr_alert("%s" TORTURE_FLAG 2587 " RCU barrier testing omitted from run.\n", 2588 torture_type); 2589 return 0; 2590 } 2591 atomic_set(&barrier_cbs_count, 0); 2592 atomic_set(&barrier_cbs_invoked, 0); 2593 barrier_cbs_tasks = 2594 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2595 GFP_KERNEL); 2596 barrier_cbs_wq = 2597 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2598 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2599 return -ENOMEM; 2600 for (i = 0; i < n_barrier_cbs; i++) { 2601 init_waitqueue_head(&barrier_cbs_wq[i]); 2602 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2603 (void *)(long)i, 2604 barrier_cbs_tasks[i]); 2605 if (ret) 2606 return ret; 2607 } 2608 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2609 } 2610 2611 /* Clean up after RCU barrier testing. */ 2612 static void rcu_torture_barrier_cleanup(void) 2613 { 2614 int i; 2615 2616 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2617 if (barrier_cbs_tasks != NULL) { 2618 for (i = 0; i < n_barrier_cbs; i++) 2619 torture_stop_kthread(rcu_torture_barrier_cbs, 2620 barrier_cbs_tasks[i]); 2621 kfree(barrier_cbs_tasks); 2622 barrier_cbs_tasks = NULL; 2623 } 2624 if (barrier_cbs_wq != NULL) { 2625 kfree(barrier_cbs_wq); 2626 barrier_cbs_wq = NULL; 2627 } 2628 } 2629 2630 static bool rcu_torture_can_boost(void) 2631 { 2632 static int boost_warn_once; 2633 int prio; 2634 2635 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2636 return false; 2637 if (!cur_ops->call) 2638 return false; 2639 2640 prio = rcu_get_gp_kthreads_prio(); 2641 if (!prio) 2642 return false; 2643 2644 if (prio < 2) { 2645 if (boost_warn_once == 1) 2646 return false; 2647 2648 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2649 boost_warn_once = 1; 2650 return false; 2651 } 2652 2653 return true; 2654 } 2655 2656 static bool read_exit_child_stop; 2657 static bool read_exit_child_stopped; 2658 static wait_queue_head_t read_exit_wq; 2659 2660 // Child kthread which just does an rcutorture reader and exits. 2661 static int rcu_torture_read_exit_child(void *trsp_in) 2662 { 2663 struct torture_random_state *trsp = trsp_in; 2664 2665 set_user_nice(current, MAX_NICE); 2666 // Minimize time between reading and exiting. 2667 while (!kthread_should_stop()) 2668 schedule_timeout_uninterruptible(1); 2669 (void)rcu_torture_one_read(trsp, -1); 2670 return 0; 2671 } 2672 2673 // Parent kthread which creates and destroys read-exit child kthreads. 2674 static int rcu_torture_read_exit(void *unused) 2675 { 2676 int count = 0; 2677 bool errexit = false; 2678 int i; 2679 struct task_struct *tsp; 2680 DEFINE_TORTURE_RANDOM(trs); 2681 2682 // Allocate and initialize. 2683 set_user_nice(current, MAX_NICE); 2684 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 2685 2686 // Each pass through this loop does one read-exit episode. 2687 do { 2688 if (++count > read_exit_burst) { 2689 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 2690 rcu_barrier(); // Wait for task_struct free, avoid OOM. 2691 for (i = 0; i < read_exit_delay; i++) { 2692 schedule_timeout_uninterruptible(HZ); 2693 if (READ_ONCE(read_exit_child_stop)) 2694 break; 2695 } 2696 if (!READ_ONCE(read_exit_child_stop)) 2697 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 2698 count = 0; 2699 } 2700 if (READ_ONCE(read_exit_child_stop)) 2701 break; 2702 // Spawn child. 2703 tsp = kthread_run(rcu_torture_read_exit_child, 2704 &trs, "%s", 2705 "rcu_torture_read_exit_child"); 2706 if (IS_ERR(tsp)) { 2707 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2708 errexit = true; 2709 tsp = NULL; 2710 break; 2711 } 2712 cond_resched(); 2713 kthread_stop(tsp); 2714 n_read_exits ++; 2715 stutter_wait("rcu_torture_read_exit"); 2716 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 2717 2718 // Clean up and exit. 2719 smp_store_release(&read_exit_child_stopped, true); // After reaping. 2720 smp_mb(); // Store before wakeup. 2721 wake_up(&read_exit_wq); 2722 while (!torture_must_stop()) 2723 schedule_timeout_uninterruptible(1); 2724 torture_kthread_stopping("rcu_torture_read_exit"); 2725 return 0; 2726 } 2727 2728 static int rcu_torture_read_exit_init(void) 2729 { 2730 if (read_exit_burst <= 0) 2731 return -EINVAL; 2732 init_waitqueue_head(&read_exit_wq); 2733 read_exit_child_stop = false; 2734 read_exit_child_stopped = false; 2735 return torture_create_kthread(rcu_torture_read_exit, NULL, 2736 read_exit_task); 2737 } 2738 2739 static void rcu_torture_read_exit_cleanup(void) 2740 { 2741 if (!read_exit_task) 2742 return; 2743 WRITE_ONCE(read_exit_child_stop, true); 2744 smp_mb(); // Above write before wait. 2745 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 2746 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 2747 } 2748 2749 static enum cpuhp_state rcutor_hp; 2750 2751 static void 2752 rcu_torture_cleanup(void) 2753 { 2754 int firsttime; 2755 int flags = 0; 2756 unsigned long gp_seq = 0; 2757 int i; 2758 2759 if (torture_cleanup_begin()) { 2760 if (cur_ops->cb_barrier != NULL) 2761 cur_ops->cb_barrier(); 2762 return; 2763 } 2764 if (!cur_ops) { 2765 torture_cleanup_end(); 2766 return; 2767 } 2768 2769 if (cur_ops->gp_kthread_dbg) 2770 cur_ops->gp_kthread_dbg(); 2771 rcu_torture_read_exit_cleanup(); 2772 rcu_torture_barrier_cleanup(); 2773 rcu_torture_fwd_prog_cleanup(); 2774 torture_stop_kthread(rcu_torture_stall, stall_task); 2775 torture_stop_kthread(rcu_torture_writer, writer_task); 2776 2777 if (nocb_tasks) { 2778 for (i = 0; i < nrealnocbers; i++) 2779 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]); 2780 kfree(nocb_tasks); 2781 nocb_tasks = NULL; 2782 } 2783 2784 if (reader_tasks) { 2785 for (i = 0; i < nrealreaders; i++) 2786 torture_stop_kthread(rcu_torture_reader, 2787 reader_tasks[i]); 2788 kfree(reader_tasks); 2789 reader_tasks = NULL; 2790 } 2791 kfree(rcu_torture_reader_mbchk); 2792 rcu_torture_reader_mbchk = NULL; 2793 2794 if (fakewriter_tasks) { 2795 for (i = 0; i < nfakewriters; i++) 2796 torture_stop_kthread(rcu_torture_fakewriter, 2797 fakewriter_tasks[i]); 2798 kfree(fakewriter_tasks); 2799 fakewriter_tasks = NULL; 2800 } 2801 2802 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2803 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2804 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 2805 cur_ops->name, (long)gp_seq, flags, 2806 rcutorture_seq_diff(gp_seq, start_gp_seq)); 2807 torture_stop_kthread(rcu_torture_stats, stats_task); 2808 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2809 if (rcu_torture_can_boost()) 2810 cpuhp_remove_state(rcutor_hp); 2811 2812 /* 2813 * Wait for all RCU callbacks to fire, then do torture-type-specific 2814 * cleanup operations. 2815 */ 2816 if (cur_ops->cb_barrier != NULL) 2817 cur_ops->cb_barrier(); 2818 if (cur_ops->cleanup != NULL) 2819 cur_ops->cleanup(); 2820 2821 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2822 2823 if (err_segs_recorded) { 2824 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2825 if (rt_read_nsegs == 0) 2826 pr_alert("\t: No segments recorded!!!\n"); 2827 firsttime = 1; 2828 for (i = 0; i < rt_read_nsegs; i++) { 2829 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2830 if (err_segs[i].rt_delay_jiffies != 0) { 2831 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2832 err_segs[i].rt_delay_jiffies); 2833 firsttime = 0; 2834 } 2835 if (err_segs[i].rt_delay_ms != 0) { 2836 pr_cont("%s%ldms", firsttime ? "" : "+", 2837 err_segs[i].rt_delay_ms); 2838 firsttime = 0; 2839 } 2840 if (err_segs[i].rt_delay_us != 0) { 2841 pr_cont("%s%ldus", firsttime ? "" : "+", 2842 err_segs[i].rt_delay_us); 2843 firsttime = 0; 2844 } 2845 pr_cont("%s\n", 2846 err_segs[i].rt_preempted ? "preempted" : ""); 2847 2848 } 2849 } 2850 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2851 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2852 else if (torture_onoff_failures()) 2853 rcu_torture_print_module_parms(cur_ops, 2854 "End of test: RCU_HOTPLUG"); 2855 else 2856 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2857 torture_cleanup_end(); 2858 } 2859 2860 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2861 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2862 { 2863 } 2864 2865 static void rcu_torture_err_cb(struct rcu_head *rhp) 2866 { 2867 /* 2868 * This -might- happen due to race conditions, but is unlikely. 2869 * The scenario that leads to this happening is that the 2870 * first of the pair of duplicate callbacks is queued, 2871 * someone else starts a grace period that includes that 2872 * callback, then the second of the pair must wait for the 2873 * next grace period. Unlikely, but can happen. If it 2874 * does happen, the debug-objects subsystem won't have splatted. 2875 */ 2876 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2877 } 2878 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2879 2880 /* 2881 * Verify that double-free causes debug-objects to complain, but only 2882 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2883 * cannot be carried out. 2884 */ 2885 static void rcu_test_debug_objects(void) 2886 { 2887 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2888 struct rcu_head rh1; 2889 struct rcu_head rh2; 2890 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2891 2892 init_rcu_head_on_stack(&rh1); 2893 init_rcu_head_on_stack(&rh2); 2894 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2895 2896 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2897 preempt_disable(); /* Prevent preemption from interrupting test. */ 2898 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2899 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2900 local_irq_disable(); /* Make it harder to start a new grace period. */ 2901 call_rcu(&rh2, rcu_torture_leak_cb); 2902 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2903 if (rhp) { 2904 call_rcu(rhp, rcu_torture_leak_cb); 2905 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 2906 } 2907 local_irq_enable(); 2908 rcu_read_unlock(); 2909 preempt_enable(); 2910 2911 /* Wait for them all to get done so we can safely return. */ 2912 rcu_barrier(); 2913 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2914 destroy_rcu_head_on_stack(&rh1); 2915 destroy_rcu_head_on_stack(&rh2); 2916 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2917 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2918 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2919 } 2920 2921 static void rcutorture_sync(void) 2922 { 2923 static unsigned long n; 2924 2925 if (cur_ops->sync && !(++n & 0xfff)) 2926 cur_ops->sync(); 2927 } 2928 2929 static int __init 2930 rcu_torture_init(void) 2931 { 2932 long i; 2933 int cpu; 2934 int firsterr = 0; 2935 int flags = 0; 2936 unsigned long gp_seq = 0; 2937 static struct rcu_torture_ops *torture_ops[] = { 2938 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2939 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, 2940 &tasks_tracing_ops, &trivial_ops, 2941 }; 2942 2943 if (!torture_init_begin(torture_type, verbose)) 2944 return -EBUSY; 2945 2946 /* Process args and tell the world that the torturer is on the job. */ 2947 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2948 cur_ops = torture_ops[i]; 2949 if (strcmp(torture_type, cur_ops->name) == 0) 2950 break; 2951 } 2952 if (i == ARRAY_SIZE(torture_ops)) { 2953 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2954 torture_type); 2955 pr_alert("rcu-torture types:"); 2956 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2957 pr_cont(" %s", torture_ops[i]->name); 2958 pr_cont("\n"); 2959 firsterr = -EINVAL; 2960 cur_ops = NULL; 2961 goto unwind; 2962 } 2963 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2964 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2965 fqs_duration = 0; 2966 } 2967 if (cur_ops->init) 2968 cur_ops->init(); 2969 2970 if (nreaders >= 0) { 2971 nrealreaders = nreaders; 2972 } else { 2973 nrealreaders = num_online_cpus() - 2 - nreaders; 2974 if (nrealreaders <= 0) 2975 nrealreaders = 1; 2976 } 2977 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2978 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2979 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2980 start_gp_seq = gp_seq; 2981 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 2982 cur_ops->name, (long)gp_seq, flags); 2983 2984 /* Set up the freelist. */ 2985 2986 INIT_LIST_HEAD(&rcu_torture_freelist); 2987 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2988 rcu_tortures[i].rtort_mbtest = 0; 2989 list_add_tail(&rcu_tortures[i].rtort_free, 2990 &rcu_torture_freelist); 2991 } 2992 2993 /* Initialize the statistics so that each run gets its own numbers. */ 2994 2995 rcu_torture_current = NULL; 2996 rcu_torture_current_version = 0; 2997 atomic_set(&n_rcu_torture_alloc, 0); 2998 atomic_set(&n_rcu_torture_alloc_fail, 0); 2999 atomic_set(&n_rcu_torture_free, 0); 3000 atomic_set(&n_rcu_torture_mberror, 0); 3001 atomic_set(&n_rcu_torture_mbchk_fail, 0); 3002 atomic_set(&n_rcu_torture_mbchk_tries, 0); 3003 atomic_set(&n_rcu_torture_error, 0); 3004 n_rcu_torture_barrier_error = 0; 3005 n_rcu_torture_boost_ktrerror = 0; 3006 n_rcu_torture_boost_rterror = 0; 3007 n_rcu_torture_boost_failure = 0; 3008 n_rcu_torture_boosts = 0; 3009 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 3010 atomic_set(&rcu_torture_wcount[i], 0); 3011 for_each_possible_cpu(cpu) { 3012 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 3013 per_cpu(rcu_torture_count, cpu)[i] = 0; 3014 per_cpu(rcu_torture_batch, cpu)[i] = 0; 3015 } 3016 } 3017 err_segs_recorded = 0; 3018 rt_read_nsegs = 0; 3019 3020 /* Start up the kthreads. */ 3021 3022 rcu_torture_write_types(); 3023 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 3024 writer_task); 3025 if (firsterr) 3026 goto unwind; 3027 if (nfakewriters > 0) { 3028 fakewriter_tasks = kcalloc(nfakewriters, 3029 sizeof(fakewriter_tasks[0]), 3030 GFP_KERNEL); 3031 if (fakewriter_tasks == NULL) { 3032 VERBOSE_TOROUT_ERRSTRING("out of memory"); 3033 firsterr = -ENOMEM; 3034 goto unwind; 3035 } 3036 } 3037 for (i = 0; i < nfakewriters; i++) { 3038 firsterr = torture_create_kthread(rcu_torture_fakewriter, 3039 NULL, fakewriter_tasks[i]); 3040 if (firsterr) 3041 goto unwind; 3042 } 3043 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 3044 GFP_KERNEL); 3045 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 3046 GFP_KERNEL); 3047 if (!reader_tasks || !rcu_torture_reader_mbchk) { 3048 VERBOSE_TOROUT_ERRSTRING("out of memory"); 3049 firsterr = -ENOMEM; 3050 goto unwind; 3051 } 3052 for (i = 0; i < nrealreaders; i++) { 3053 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 3054 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 3055 reader_tasks[i]); 3056 if (firsterr) 3057 goto unwind; 3058 } 3059 nrealnocbers = nocbs_nthreads; 3060 if (WARN_ON(nrealnocbers < 0)) 3061 nrealnocbers = 1; 3062 if (WARN_ON(nocbs_toggle < 0)) 3063 nocbs_toggle = HZ; 3064 if (nrealnocbers > 0) { 3065 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL); 3066 if (nocb_tasks == NULL) { 3067 VERBOSE_TOROUT_ERRSTRING("out of memory"); 3068 firsterr = -ENOMEM; 3069 goto unwind; 3070 } 3071 } else { 3072 nocb_tasks = NULL; 3073 } 3074 for (i = 0; i < nrealnocbers; i++) { 3075 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]); 3076 if (firsterr) 3077 goto unwind; 3078 } 3079 if (stat_interval > 0) { 3080 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 3081 stats_task); 3082 if (firsterr) 3083 goto unwind; 3084 } 3085 if (test_no_idle_hz && shuffle_interval > 0) { 3086 firsterr = torture_shuffle_init(shuffle_interval * HZ); 3087 if (firsterr) 3088 goto unwind; 3089 } 3090 if (stutter < 0) 3091 stutter = 0; 3092 if (stutter) { 3093 int t; 3094 3095 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 3096 firsterr = torture_stutter_init(stutter * HZ, t); 3097 if (firsterr) 3098 goto unwind; 3099 } 3100 if (fqs_duration < 0) 3101 fqs_duration = 0; 3102 if (fqs_duration) { 3103 /* Create the fqs thread */ 3104 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 3105 fqs_task); 3106 if (firsterr) 3107 goto unwind; 3108 } 3109 if (test_boost_interval < 1) 3110 test_boost_interval = 1; 3111 if (test_boost_duration < 2) 3112 test_boost_duration = 2; 3113 if (rcu_torture_can_boost()) { 3114 3115 boost_starttime = jiffies + test_boost_interval * HZ; 3116 3117 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 3118 rcutorture_booster_init, 3119 rcutorture_booster_cleanup); 3120 if (firsterr < 0) 3121 goto unwind; 3122 rcutor_hp = firsterr; 3123 } 3124 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3125 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3126 if (firsterr) 3127 goto unwind; 3128 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3129 rcutorture_sync); 3130 if (firsterr) 3131 goto unwind; 3132 firsterr = rcu_torture_stall_init(); 3133 if (firsterr) 3134 goto unwind; 3135 firsterr = rcu_torture_fwd_prog_init(); 3136 if (firsterr) 3137 goto unwind; 3138 firsterr = rcu_torture_barrier_init(); 3139 if (firsterr) 3140 goto unwind; 3141 firsterr = rcu_torture_read_exit_init(); 3142 if (firsterr) 3143 goto unwind; 3144 if (object_debug) 3145 rcu_test_debug_objects(); 3146 torture_init_end(); 3147 return 0; 3148 3149 unwind: 3150 torture_init_end(); 3151 rcu_torture_cleanup(); 3152 if (shutdown_secs) { 3153 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3154 kernel_power_off(); 3155 } 3156 return firsterr; 3157 } 3158 3159 module_init(rcu_torture_init); 3160 module_exit(rcu_torture_cleanup); 3161