1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 48 #include "rcu.h" 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 52 53 54 /* Bits for ->extendables field, extendables param, and related definitions. */ 55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 64 #define RCUTORTURE_MAX_EXTEND \ 65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 68 /* Must be power of two minus one. */ 69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 70 71 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 73 torture_param(int, fqs_duration, 0, 74 "Duration of fqs bursts (us), 0 to disable"); 75 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 76 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 77 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 78 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 79 torture_param(int, fwd_progress_holdoff, 60, 80 "Time between forward-progress tests (s)"); 81 torture_param(bool, fwd_progress_need_resched, 1, 82 "Hide cond_resched() behind need_resched()"); 83 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 84 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 85 torture_param(bool, gp_normal, false, 86 "Use normal (non-expedited) GP wait primitives"); 87 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 88 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 89 torture_param(int, n_barrier_cbs, 0, 90 "# of callbacks/kthreads for barrier testing"); 91 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 92 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 93 torture_param(int, object_debug, 0, 94 "Enable debug-object double call_rcu() testing"); 95 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 96 torture_param(int, onoff_interval, 0, 97 "Time between CPU hotplugs (jiffies), 0=disable"); 98 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 99 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 100 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 101 torture_param(int, stall_cpu_holdoff, 10, 102 "Time to wait before starting stall (s)."); 103 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 104 torture_param(int, stat_interval, 60, 105 "Number of seconds between stats printk()s"); 106 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 107 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 108 torture_param(int, test_boost_duration, 4, 109 "Duration of each boost test, seconds."); 110 torture_param(int, test_boost_interval, 7, 111 "Interval between boost tests, seconds."); 112 torture_param(bool, test_no_idle_hz, true, 113 "Test support for tickless idle CPUs"); 114 torture_param(int, verbose, 1, 115 "Enable verbose debugging printk()s"); 116 117 static char *torture_type = "rcu"; 118 module_param(torture_type, charp, 0444); 119 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 120 121 static int nrealreaders; 122 static struct task_struct *writer_task; 123 static struct task_struct **fakewriter_tasks; 124 static struct task_struct **reader_tasks; 125 static struct task_struct *stats_task; 126 static struct task_struct *fqs_task; 127 static struct task_struct *boost_tasks[NR_CPUS]; 128 static struct task_struct *stall_task; 129 static struct task_struct *fwd_prog_task; 130 static struct task_struct **barrier_cbs_tasks; 131 static struct task_struct *barrier_task; 132 133 #define RCU_TORTURE_PIPE_LEN 10 134 135 struct rcu_torture { 136 struct rcu_head rtort_rcu; 137 int rtort_pipe_count; 138 struct list_head rtort_free; 139 int rtort_mbtest; 140 }; 141 142 static LIST_HEAD(rcu_torture_freelist); 143 static struct rcu_torture __rcu *rcu_torture_current; 144 static unsigned long rcu_torture_current_version; 145 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 146 static DEFINE_SPINLOCK(rcu_torture_lock); 147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 149 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 150 static atomic_t n_rcu_torture_alloc; 151 static atomic_t n_rcu_torture_alloc_fail; 152 static atomic_t n_rcu_torture_free; 153 static atomic_t n_rcu_torture_mberror; 154 static atomic_t n_rcu_torture_error; 155 static long n_rcu_torture_barrier_error; 156 static long n_rcu_torture_boost_ktrerror; 157 static long n_rcu_torture_boost_rterror; 158 static long n_rcu_torture_boost_failure; 159 static long n_rcu_torture_boosts; 160 static atomic_long_t n_rcu_torture_timers; 161 static long n_barrier_attempts; 162 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 163 static struct list_head rcu_torture_removed; 164 165 static int rcu_torture_writer_state; 166 #define RTWS_FIXED_DELAY 0 167 #define RTWS_DELAY 1 168 #define RTWS_REPLACE 2 169 #define RTWS_DEF_FREE 3 170 #define RTWS_EXP_SYNC 4 171 #define RTWS_COND_GET 5 172 #define RTWS_COND_SYNC 6 173 #define RTWS_SYNC 7 174 #define RTWS_STUTTER 8 175 #define RTWS_STOPPING 9 176 static const char * const rcu_torture_writer_state_names[] = { 177 "RTWS_FIXED_DELAY", 178 "RTWS_DELAY", 179 "RTWS_REPLACE", 180 "RTWS_DEF_FREE", 181 "RTWS_EXP_SYNC", 182 "RTWS_COND_GET", 183 "RTWS_COND_SYNC", 184 "RTWS_SYNC", 185 "RTWS_STUTTER", 186 "RTWS_STOPPING", 187 }; 188 189 /* Record reader segment types and duration for first failing read. */ 190 struct rt_read_seg { 191 int rt_readstate; 192 unsigned long rt_delay_jiffies; 193 unsigned long rt_delay_ms; 194 unsigned long rt_delay_us; 195 bool rt_preempted; 196 }; 197 static int err_segs_recorded; 198 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 199 static int rt_read_nsegs; 200 201 static const char *rcu_torture_writer_state_getname(void) 202 { 203 unsigned int i = READ_ONCE(rcu_torture_writer_state); 204 205 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 206 return "???"; 207 return rcu_torture_writer_state_names[i]; 208 } 209 210 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 211 #define rcu_can_boost() 1 212 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 213 #define rcu_can_boost() 0 214 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 215 216 #ifdef CONFIG_RCU_TRACE 217 static u64 notrace rcu_trace_clock_local(void) 218 { 219 u64 ts = trace_clock_local(); 220 221 (void)do_div(ts, NSEC_PER_USEC); 222 return ts; 223 } 224 #else /* #ifdef CONFIG_RCU_TRACE */ 225 static u64 notrace rcu_trace_clock_local(void) 226 { 227 return 0ULL; 228 } 229 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 230 231 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 232 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 233 /* and boost task create/destroy. */ 234 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 235 static bool barrier_phase; /* Test phase. */ 236 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 237 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 238 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 239 240 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 241 242 /* 243 * Allocate an element from the rcu_tortures pool. 244 */ 245 static struct rcu_torture * 246 rcu_torture_alloc(void) 247 { 248 struct list_head *p; 249 250 spin_lock_bh(&rcu_torture_lock); 251 if (list_empty(&rcu_torture_freelist)) { 252 atomic_inc(&n_rcu_torture_alloc_fail); 253 spin_unlock_bh(&rcu_torture_lock); 254 return NULL; 255 } 256 atomic_inc(&n_rcu_torture_alloc); 257 p = rcu_torture_freelist.next; 258 list_del_init(p); 259 spin_unlock_bh(&rcu_torture_lock); 260 return container_of(p, struct rcu_torture, rtort_free); 261 } 262 263 /* 264 * Free an element to the rcu_tortures pool. 265 */ 266 static void 267 rcu_torture_free(struct rcu_torture *p) 268 { 269 atomic_inc(&n_rcu_torture_free); 270 spin_lock_bh(&rcu_torture_lock); 271 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 272 spin_unlock_bh(&rcu_torture_lock); 273 } 274 275 /* 276 * Operations vector for selecting different types of tests. 277 */ 278 279 struct rcu_torture_ops { 280 int ttype; 281 void (*init)(void); 282 void (*cleanup)(void); 283 int (*readlock)(void); 284 void (*read_delay)(struct torture_random_state *rrsp, 285 struct rt_read_seg *rtrsp); 286 void (*readunlock)(int idx); 287 unsigned long (*get_gp_seq)(void); 288 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 289 void (*deferred_free)(struct rcu_torture *p); 290 void (*sync)(void); 291 void (*exp_sync)(void); 292 unsigned long (*get_state)(void); 293 void (*cond_sync)(unsigned long oldstate); 294 call_rcu_func_t call; 295 void (*cb_barrier)(void); 296 void (*fqs)(void); 297 void (*stats)(void); 298 int (*stall_dur)(void); 299 int irq_capable; 300 int can_boost; 301 int extendables; 302 const char *name; 303 }; 304 305 static struct rcu_torture_ops *cur_ops; 306 307 /* 308 * Definitions for rcu torture testing. 309 */ 310 311 static int rcu_torture_read_lock(void) __acquires(RCU) 312 { 313 rcu_read_lock(); 314 return 0; 315 } 316 317 static void 318 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 319 { 320 unsigned long started; 321 unsigned long completed; 322 const unsigned long shortdelay_us = 200; 323 unsigned long longdelay_ms = 300; 324 unsigned long long ts; 325 326 /* We want a short delay sometimes to make a reader delay the grace 327 * period, and we want a long delay occasionally to trigger 328 * force_quiescent_state. */ 329 330 if (!rcu_fwd_cb_nodelay && 331 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 332 started = cur_ops->get_gp_seq(); 333 ts = rcu_trace_clock_local(); 334 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 335 longdelay_ms = 5; /* Avoid triggering BH limits. */ 336 mdelay(longdelay_ms); 337 rtrsp->rt_delay_ms = longdelay_ms; 338 completed = cur_ops->get_gp_seq(); 339 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 340 started, completed); 341 } 342 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 343 udelay(shortdelay_us); 344 rtrsp->rt_delay_us = shortdelay_us; 345 } 346 if (!preempt_count() && 347 !(torture_random(rrsp) % (nrealreaders * 500))) { 348 torture_preempt_schedule(); /* QS only if preemptible. */ 349 rtrsp->rt_preempted = true; 350 } 351 } 352 353 static void rcu_torture_read_unlock(int idx) __releases(RCU) 354 { 355 rcu_read_unlock(); 356 } 357 358 /* 359 * Update callback in the pipe. This should be invoked after a grace period. 360 */ 361 static bool 362 rcu_torture_pipe_update_one(struct rcu_torture *rp) 363 { 364 int i; 365 366 i = rp->rtort_pipe_count; 367 if (i > RCU_TORTURE_PIPE_LEN) 368 i = RCU_TORTURE_PIPE_LEN; 369 atomic_inc(&rcu_torture_wcount[i]); 370 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 371 rp->rtort_mbtest = 0; 372 return true; 373 } 374 return false; 375 } 376 377 /* 378 * Update all callbacks in the pipe. Suitable for synchronous grace-period 379 * primitives. 380 */ 381 static void 382 rcu_torture_pipe_update(struct rcu_torture *old_rp) 383 { 384 struct rcu_torture *rp; 385 struct rcu_torture *rp1; 386 387 if (old_rp) 388 list_add(&old_rp->rtort_free, &rcu_torture_removed); 389 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 390 if (rcu_torture_pipe_update_one(rp)) { 391 list_del(&rp->rtort_free); 392 rcu_torture_free(rp); 393 } 394 } 395 } 396 397 static void 398 rcu_torture_cb(struct rcu_head *p) 399 { 400 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 401 402 if (torture_must_stop_irq()) { 403 /* Test is ending, just drop callbacks on the floor. */ 404 /* The next initialization will pick up the pieces. */ 405 return; 406 } 407 if (rcu_torture_pipe_update_one(rp)) 408 rcu_torture_free(rp); 409 else 410 cur_ops->deferred_free(rp); 411 } 412 413 static unsigned long rcu_no_completed(void) 414 { 415 return 0; 416 } 417 418 static void rcu_torture_deferred_free(struct rcu_torture *p) 419 { 420 call_rcu(&p->rtort_rcu, rcu_torture_cb); 421 } 422 423 static void rcu_sync_torture_init(void) 424 { 425 INIT_LIST_HEAD(&rcu_torture_removed); 426 } 427 428 static struct rcu_torture_ops rcu_ops = { 429 .ttype = RCU_FLAVOR, 430 .init = rcu_sync_torture_init, 431 .readlock = rcu_torture_read_lock, 432 .read_delay = rcu_read_delay, 433 .readunlock = rcu_torture_read_unlock, 434 .get_gp_seq = rcu_get_gp_seq, 435 .gp_diff = rcu_seq_diff, 436 .deferred_free = rcu_torture_deferred_free, 437 .sync = synchronize_rcu, 438 .exp_sync = synchronize_rcu_expedited, 439 .get_state = get_state_synchronize_rcu, 440 .cond_sync = cond_synchronize_rcu, 441 .call = call_rcu, 442 .cb_barrier = rcu_barrier, 443 .fqs = rcu_force_quiescent_state, 444 .stats = NULL, 445 .stall_dur = rcu_jiffies_till_stall_check, 446 .irq_capable = 1, 447 .can_boost = rcu_can_boost(), 448 .extendables = RCUTORTURE_MAX_EXTEND, 449 .name = "rcu" 450 }; 451 452 /* 453 * Don't even think about trying any of these in real life!!! 454 * The names includes "busted", and they really means it! 455 * The only purpose of these functions is to provide a buggy RCU 456 * implementation to make sure that rcutorture correctly emits 457 * buggy-RCU error messages. 458 */ 459 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 460 { 461 /* This is a deliberate bug for testing purposes only! */ 462 rcu_torture_cb(&p->rtort_rcu); 463 } 464 465 static void synchronize_rcu_busted(void) 466 { 467 /* This is a deliberate bug for testing purposes only! */ 468 } 469 470 static void 471 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 472 { 473 /* This is a deliberate bug for testing purposes only! */ 474 func(head); 475 } 476 477 static struct rcu_torture_ops rcu_busted_ops = { 478 .ttype = INVALID_RCU_FLAVOR, 479 .init = rcu_sync_torture_init, 480 .readlock = rcu_torture_read_lock, 481 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 482 .readunlock = rcu_torture_read_unlock, 483 .get_gp_seq = rcu_no_completed, 484 .deferred_free = rcu_busted_torture_deferred_free, 485 .sync = synchronize_rcu_busted, 486 .exp_sync = synchronize_rcu_busted, 487 .call = call_rcu_busted, 488 .cb_barrier = NULL, 489 .fqs = NULL, 490 .stats = NULL, 491 .irq_capable = 1, 492 .name = "busted" 493 }; 494 495 /* 496 * Definitions for srcu torture testing. 497 */ 498 499 DEFINE_STATIC_SRCU(srcu_ctl); 500 static struct srcu_struct srcu_ctld; 501 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 502 503 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 504 { 505 return srcu_read_lock(srcu_ctlp); 506 } 507 508 static void 509 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 510 { 511 long delay; 512 const long uspertick = 1000000 / HZ; 513 const long longdelay = 10; 514 515 /* We want there to be long-running readers, but not all the time. */ 516 517 delay = torture_random(rrsp) % 518 (nrealreaders * 2 * longdelay * uspertick); 519 if (!delay && in_task()) { 520 schedule_timeout_interruptible(longdelay); 521 rtrsp->rt_delay_jiffies = longdelay; 522 } else { 523 rcu_read_delay(rrsp, rtrsp); 524 } 525 } 526 527 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 528 { 529 srcu_read_unlock(srcu_ctlp, idx); 530 } 531 532 static unsigned long srcu_torture_completed(void) 533 { 534 return srcu_batches_completed(srcu_ctlp); 535 } 536 537 static void srcu_torture_deferred_free(struct rcu_torture *rp) 538 { 539 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 540 } 541 542 static void srcu_torture_synchronize(void) 543 { 544 synchronize_srcu(srcu_ctlp); 545 } 546 547 static void srcu_torture_call(struct rcu_head *head, 548 rcu_callback_t func) 549 { 550 call_srcu(srcu_ctlp, head, func); 551 } 552 553 static void srcu_torture_barrier(void) 554 { 555 srcu_barrier(srcu_ctlp); 556 } 557 558 static void srcu_torture_stats(void) 559 { 560 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 561 } 562 563 static void srcu_torture_synchronize_expedited(void) 564 { 565 synchronize_srcu_expedited(srcu_ctlp); 566 } 567 568 static struct rcu_torture_ops srcu_ops = { 569 .ttype = SRCU_FLAVOR, 570 .init = rcu_sync_torture_init, 571 .readlock = srcu_torture_read_lock, 572 .read_delay = srcu_read_delay, 573 .readunlock = srcu_torture_read_unlock, 574 .get_gp_seq = srcu_torture_completed, 575 .deferred_free = srcu_torture_deferred_free, 576 .sync = srcu_torture_synchronize, 577 .exp_sync = srcu_torture_synchronize_expedited, 578 .call = srcu_torture_call, 579 .cb_barrier = srcu_torture_barrier, 580 .stats = srcu_torture_stats, 581 .irq_capable = 1, 582 .name = "srcu" 583 }; 584 585 static void srcu_torture_init(void) 586 { 587 rcu_sync_torture_init(); 588 WARN_ON(init_srcu_struct(&srcu_ctld)); 589 srcu_ctlp = &srcu_ctld; 590 } 591 592 static void srcu_torture_cleanup(void) 593 { 594 cleanup_srcu_struct(&srcu_ctld); 595 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 596 } 597 598 /* As above, but dynamically allocated. */ 599 static struct rcu_torture_ops srcud_ops = { 600 .ttype = SRCU_FLAVOR, 601 .init = srcu_torture_init, 602 .cleanup = srcu_torture_cleanup, 603 .readlock = srcu_torture_read_lock, 604 .read_delay = srcu_read_delay, 605 .readunlock = srcu_torture_read_unlock, 606 .get_gp_seq = srcu_torture_completed, 607 .deferred_free = srcu_torture_deferred_free, 608 .sync = srcu_torture_synchronize, 609 .exp_sync = srcu_torture_synchronize_expedited, 610 .call = srcu_torture_call, 611 .cb_barrier = srcu_torture_barrier, 612 .stats = srcu_torture_stats, 613 .irq_capable = 1, 614 .name = "srcud" 615 }; 616 617 /* As above, but broken due to inappropriate reader extension. */ 618 static struct rcu_torture_ops busted_srcud_ops = { 619 .ttype = SRCU_FLAVOR, 620 .init = srcu_torture_init, 621 .cleanup = srcu_torture_cleanup, 622 .readlock = srcu_torture_read_lock, 623 .read_delay = rcu_read_delay, 624 .readunlock = srcu_torture_read_unlock, 625 .get_gp_seq = srcu_torture_completed, 626 .deferred_free = srcu_torture_deferred_free, 627 .sync = srcu_torture_synchronize, 628 .exp_sync = srcu_torture_synchronize_expedited, 629 .call = srcu_torture_call, 630 .cb_barrier = srcu_torture_barrier, 631 .stats = srcu_torture_stats, 632 .irq_capable = 1, 633 .extendables = RCUTORTURE_MAX_EXTEND, 634 .name = "busted_srcud" 635 }; 636 637 /* 638 * Definitions for RCU-tasks torture testing. 639 */ 640 641 static int tasks_torture_read_lock(void) 642 { 643 return 0; 644 } 645 646 static void tasks_torture_read_unlock(int idx) 647 { 648 } 649 650 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 651 { 652 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 653 } 654 655 static struct rcu_torture_ops tasks_ops = { 656 .ttype = RCU_TASKS_FLAVOR, 657 .init = rcu_sync_torture_init, 658 .readlock = tasks_torture_read_lock, 659 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 660 .readunlock = tasks_torture_read_unlock, 661 .get_gp_seq = rcu_no_completed, 662 .deferred_free = rcu_tasks_torture_deferred_free, 663 .sync = synchronize_rcu_tasks, 664 .exp_sync = synchronize_rcu_tasks, 665 .call = call_rcu_tasks, 666 .cb_barrier = rcu_barrier_tasks, 667 .fqs = NULL, 668 .stats = NULL, 669 .irq_capable = 1, 670 .name = "tasks" 671 }; 672 673 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 674 { 675 if (!cur_ops->gp_diff) 676 return new - old; 677 return cur_ops->gp_diff(new, old); 678 } 679 680 static bool __maybe_unused torturing_tasks(void) 681 { 682 return cur_ops == &tasks_ops; 683 } 684 685 /* 686 * RCU torture priority-boost testing. Runs one real-time thread per 687 * CPU for moderate bursts, repeatedly registering RCU callbacks and 688 * spinning waiting for them to be invoked. If a given callback takes 689 * too long to be invoked, we assume that priority inversion has occurred. 690 */ 691 692 struct rcu_boost_inflight { 693 struct rcu_head rcu; 694 int inflight; 695 }; 696 697 static void rcu_torture_boost_cb(struct rcu_head *head) 698 { 699 struct rcu_boost_inflight *rbip = 700 container_of(head, struct rcu_boost_inflight, rcu); 701 702 /* Ensure RCU-core accesses precede clearing ->inflight */ 703 smp_store_release(&rbip->inflight, 0); 704 } 705 706 static int old_rt_runtime = -1; 707 708 static void rcu_torture_disable_rt_throttle(void) 709 { 710 /* 711 * Disable RT throttling so that rcutorture's boost threads don't get 712 * throttled. Only possible if rcutorture is built-in otherwise the 713 * user should manually do this by setting the sched_rt_period_us and 714 * sched_rt_runtime sysctls. 715 */ 716 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 717 return; 718 719 old_rt_runtime = sysctl_sched_rt_runtime; 720 sysctl_sched_rt_runtime = -1; 721 } 722 723 static void rcu_torture_enable_rt_throttle(void) 724 { 725 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 726 return; 727 728 sysctl_sched_rt_runtime = old_rt_runtime; 729 old_rt_runtime = -1; 730 } 731 732 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 733 { 734 if (end - start > test_boost_duration * HZ - HZ / 2) { 735 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 736 n_rcu_torture_boost_failure++; 737 738 return true; /* failed */ 739 } 740 741 return false; /* passed */ 742 } 743 744 static int rcu_torture_boost(void *arg) 745 { 746 unsigned long call_rcu_time; 747 unsigned long endtime; 748 unsigned long oldstarttime; 749 struct rcu_boost_inflight rbi = { .inflight = 0 }; 750 struct sched_param sp; 751 752 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 753 754 /* Set real-time priority. */ 755 sp.sched_priority = 1; 756 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 757 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 758 n_rcu_torture_boost_rterror++; 759 } 760 761 init_rcu_head_on_stack(&rbi.rcu); 762 /* Each pass through the following loop does one boost-test cycle. */ 763 do { 764 /* Track if the test failed already in this test interval? */ 765 bool failed = false; 766 767 /* Increment n_rcu_torture_boosts once per boost-test */ 768 while (!kthread_should_stop()) { 769 if (mutex_trylock(&boost_mutex)) { 770 n_rcu_torture_boosts++; 771 mutex_unlock(&boost_mutex); 772 break; 773 } 774 schedule_timeout_uninterruptible(1); 775 } 776 if (kthread_should_stop()) 777 goto checkwait; 778 779 /* Wait for the next test interval. */ 780 oldstarttime = boost_starttime; 781 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 782 schedule_timeout_interruptible(oldstarttime - jiffies); 783 stutter_wait("rcu_torture_boost"); 784 if (torture_must_stop()) 785 goto checkwait; 786 } 787 788 /* Do one boost-test interval. */ 789 endtime = oldstarttime + test_boost_duration * HZ; 790 call_rcu_time = jiffies; 791 while (ULONG_CMP_LT(jiffies, endtime)) { 792 /* If we don't have a callback in flight, post one. */ 793 if (!smp_load_acquire(&rbi.inflight)) { 794 /* RCU core before ->inflight = 1. */ 795 smp_store_release(&rbi.inflight, 1); 796 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 797 /* Check if the boost test failed */ 798 failed = failed || 799 rcu_torture_boost_failed(call_rcu_time, 800 jiffies); 801 call_rcu_time = jiffies; 802 } 803 stutter_wait("rcu_torture_boost"); 804 if (torture_must_stop()) 805 goto checkwait; 806 } 807 808 /* 809 * If boost never happened, then inflight will always be 1, in 810 * this case the boost check would never happen in the above 811 * loop so do another one here. 812 */ 813 if (!failed && smp_load_acquire(&rbi.inflight)) 814 rcu_torture_boost_failed(call_rcu_time, jiffies); 815 816 /* 817 * Set the start time of the next test interval. 818 * Yes, this is vulnerable to long delays, but such 819 * delays simply cause a false negative for the next 820 * interval. Besides, we are running at RT priority, 821 * so delays should be relatively rare. 822 */ 823 while (oldstarttime == boost_starttime && 824 !kthread_should_stop()) { 825 if (mutex_trylock(&boost_mutex)) { 826 boost_starttime = jiffies + 827 test_boost_interval * HZ; 828 mutex_unlock(&boost_mutex); 829 break; 830 } 831 schedule_timeout_uninterruptible(1); 832 } 833 834 /* Go do the stutter. */ 835 checkwait: stutter_wait("rcu_torture_boost"); 836 } while (!torture_must_stop()); 837 838 /* Clean up and exit. */ 839 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 840 torture_shutdown_absorb("rcu_torture_boost"); 841 schedule_timeout_uninterruptible(1); 842 } 843 destroy_rcu_head_on_stack(&rbi.rcu); 844 torture_kthread_stopping("rcu_torture_boost"); 845 return 0; 846 } 847 848 /* 849 * RCU torture force-quiescent-state kthread. Repeatedly induces 850 * bursts of calls to force_quiescent_state(), increasing the probability 851 * of occurrence of some important types of race conditions. 852 */ 853 static int 854 rcu_torture_fqs(void *arg) 855 { 856 unsigned long fqs_resume_time; 857 int fqs_burst_remaining; 858 859 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 860 do { 861 fqs_resume_time = jiffies + fqs_stutter * HZ; 862 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 863 !kthread_should_stop()) { 864 schedule_timeout_interruptible(1); 865 } 866 fqs_burst_remaining = fqs_duration; 867 while (fqs_burst_remaining > 0 && 868 !kthread_should_stop()) { 869 cur_ops->fqs(); 870 udelay(fqs_holdoff); 871 fqs_burst_remaining -= fqs_holdoff; 872 } 873 stutter_wait("rcu_torture_fqs"); 874 } while (!torture_must_stop()); 875 torture_kthread_stopping("rcu_torture_fqs"); 876 return 0; 877 } 878 879 /* 880 * RCU torture writer kthread. Repeatedly substitutes a new structure 881 * for that pointed to by rcu_torture_current, freeing the old structure 882 * after a series of grace periods (the "pipeline"). 883 */ 884 static int 885 rcu_torture_writer(void *arg) 886 { 887 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 888 int expediting = 0; 889 unsigned long gp_snap; 890 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 891 bool gp_sync1 = gp_sync; 892 int i; 893 struct rcu_torture *rp; 894 struct rcu_torture *old_rp; 895 static DEFINE_TORTURE_RANDOM(rand); 896 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 897 RTWS_COND_GET, RTWS_SYNC }; 898 int nsynctypes = 0; 899 900 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 901 if (!can_expedite) 902 pr_alert("%s" TORTURE_FLAG 903 " GP expediting controlled from boot/sysfs for %s.\n", 904 torture_type, cur_ops->name); 905 906 /* Initialize synctype[] array. If none set, take default. */ 907 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 908 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 909 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 910 synctype[nsynctypes++] = RTWS_COND_GET; 911 pr_info("%s: Testing conditional GPs.\n", __func__); 912 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 913 pr_alert("%s: gp_cond without primitives.\n", __func__); 914 } 915 if (gp_exp1 && cur_ops->exp_sync) { 916 synctype[nsynctypes++] = RTWS_EXP_SYNC; 917 pr_info("%s: Testing expedited GPs.\n", __func__); 918 } else if (gp_exp && !cur_ops->exp_sync) { 919 pr_alert("%s: gp_exp without primitives.\n", __func__); 920 } 921 if (gp_normal1 && cur_ops->deferred_free) { 922 synctype[nsynctypes++] = RTWS_DEF_FREE; 923 pr_info("%s: Testing asynchronous GPs.\n", __func__); 924 } else if (gp_normal && !cur_ops->deferred_free) { 925 pr_alert("%s: gp_normal without primitives.\n", __func__); 926 } 927 if (gp_sync1 && cur_ops->sync) { 928 synctype[nsynctypes++] = RTWS_SYNC; 929 pr_info("%s: Testing normal GPs.\n", __func__); 930 } else if (gp_sync && !cur_ops->sync) { 931 pr_alert("%s: gp_sync without primitives.\n", __func__); 932 } 933 if (WARN_ONCE(nsynctypes == 0, 934 "rcu_torture_writer: No update-side primitives.\n")) { 935 /* 936 * No updates primitives, so don't try updating. 937 * The resulting test won't be testing much, hence the 938 * above WARN_ONCE(). 939 */ 940 rcu_torture_writer_state = RTWS_STOPPING; 941 torture_kthread_stopping("rcu_torture_writer"); 942 } 943 944 do { 945 rcu_torture_writer_state = RTWS_FIXED_DELAY; 946 schedule_timeout_uninterruptible(1); 947 rp = rcu_torture_alloc(); 948 if (rp == NULL) 949 continue; 950 rp->rtort_pipe_count = 0; 951 rcu_torture_writer_state = RTWS_DELAY; 952 udelay(torture_random(&rand) & 0x3ff); 953 rcu_torture_writer_state = RTWS_REPLACE; 954 old_rp = rcu_dereference_check(rcu_torture_current, 955 current == writer_task); 956 rp->rtort_mbtest = 1; 957 rcu_assign_pointer(rcu_torture_current, rp); 958 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 959 if (old_rp) { 960 i = old_rp->rtort_pipe_count; 961 if (i > RCU_TORTURE_PIPE_LEN) 962 i = RCU_TORTURE_PIPE_LEN; 963 atomic_inc(&rcu_torture_wcount[i]); 964 old_rp->rtort_pipe_count++; 965 switch (synctype[torture_random(&rand) % nsynctypes]) { 966 case RTWS_DEF_FREE: 967 rcu_torture_writer_state = RTWS_DEF_FREE; 968 cur_ops->deferred_free(old_rp); 969 break; 970 case RTWS_EXP_SYNC: 971 rcu_torture_writer_state = RTWS_EXP_SYNC; 972 cur_ops->exp_sync(); 973 rcu_torture_pipe_update(old_rp); 974 break; 975 case RTWS_COND_GET: 976 rcu_torture_writer_state = RTWS_COND_GET; 977 gp_snap = cur_ops->get_state(); 978 i = torture_random(&rand) % 16; 979 if (i != 0) 980 schedule_timeout_interruptible(i); 981 udelay(torture_random(&rand) % 1000); 982 rcu_torture_writer_state = RTWS_COND_SYNC; 983 cur_ops->cond_sync(gp_snap); 984 rcu_torture_pipe_update(old_rp); 985 break; 986 case RTWS_SYNC: 987 rcu_torture_writer_state = RTWS_SYNC; 988 cur_ops->sync(); 989 rcu_torture_pipe_update(old_rp); 990 break; 991 default: 992 WARN_ON_ONCE(1); 993 break; 994 } 995 } 996 WRITE_ONCE(rcu_torture_current_version, 997 rcu_torture_current_version + 1); 998 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 999 if (can_expedite && 1000 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1001 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1002 if (expediting >= 0) 1003 rcu_expedite_gp(); 1004 else 1005 rcu_unexpedite_gp(); 1006 if (++expediting > 3) 1007 expediting = -expediting; 1008 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1009 can_expedite = !rcu_gp_is_expedited() && 1010 !rcu_gp_is_normal(); 1011 } 1012 rcu_torture_writer_state = RTWS_STUTTER; 1013 if (stutter_wait("rcu_torture_writer")) 1014 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1015 if (list_empty(&rcu_tortures[i].rtort_free)) 1016 WARN_ON_ONCE(1); 1017 } while (!torture_must_stop()); 1018 /* Reset expediting back to unexpedited. */ 1019 if (expediting > 0) 1020 expediting = -expediting; 1021 while (can_expedite && expediting++ < 0) 1022 rcu_unexpedite_gp(); 1023 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1024 if (!can_expedite) 1025 pr_alert("%s" TORTURE_FLAG 1026 " Dynamic grace-period expediting was disabled.\n", 1027 torture_type); 1028 rcu_torture_writer_state = RTWS_STOPPING; 1029 torture_kthread_stopping("rcu_torture_writer"); 1030 return 0; 1031 } 1032 1033 /* 1034 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1035 * delay between calls. 1036 */ 1037 static int 1038 rcu_torture_fakewriter(void *arg) 1039 { 1040 DEFINE_TORTURE_RANDOM(rand); 1041 1042 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1043 set_user_nice(current, MAX_NICE); 1044 1045 do { 1046 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1047 udelay(torture_random(&rand) & 0x3ff); 1048 if (cur_ops->cb_barrier != NULL && 1049 torture_random(&rand) % (nfakewriters * 8) == 0) { 1050 cur_ops->cb_barrier(); 1051 } else if (gp_normal == gp_exp) { 1052 if (cur_ops->sync && torture_random(&rand) & 0x80) 1053 cur_ops->sync(); 1054 else if (cur_ops->exp_sync) 1055 cur_ops->exp_sync(); 1056 } else if (gp_normal && cur_ops->sync) { 1057 cur_ops->sync(); 1058 } else if (cur_ops->exp_sync) { 1059 cur_ops->exp_sync(); 1060 } 1061 stutter_wait("rcu_torture_fakewriter"); 1062 } while (!torture_must_stop()); 1063 1064 torture_kthread_stopping("rcu_torture_fakewriter"); 1065 return 0; 1066 } 1067 1068 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1069 { 1070 kfree(rhp); 1071 } 1072 1073 /* 1074 * Do one extension of an RCU read-side critical section using the 1075 * current reader state in readstate (set to zero for initial entry 1076 * to extended critical section), set the new state as specified by 1077 * newstate (set to zero for final exit from extended critical section), 1078 * and random-number-generator state in trsp. If this is neither the 1079 * beginning or end of the critical section and if there was actually a 1080 * change, do a ->read_delay(). 1081 */ 1082 static void rcutorture_one_extend(int *readstate, int newstate, 1083 struct torture_random_state *trsp, 1084 struct rt_read_seg *rtrsp) 1085 { 1086 int idxnew = -1; 1087 int idxold = *readstate; 1088 int statesnew = ~*readstate & newstate; 1089 int statesold = *readstate & ~newstate; 1090 1091 WARN_ON_ONCE(idxold < 0); 1092 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1093 rtrsp->rt_readstate = newstate; 1094 1095 /* First, put new protection in place to avoid critical-section gap. */ 1096 if (statesnew & RCUTORTURE_RDR_BH) 1097 local_bh_disable(); 1098 if (statesnew & RCUTORTURE_RDR_IRQ) 1099 local_irq_disable(); 1100 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1101 preempt_disable(); 1102 if (statesnew & RCUTORTURE_RDR_RBH) 1103 rcu_read_lock_bh(); 1104 if (statesnew & RCUTORTURE_RDR_SCHED) 1105 rcu_read_lock_sched(); 1106 if (statesnew & RCUTORTURE_RDR_RCU) 1107 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1108 1109 /* Next, remove old protection, irq first due to bh conflict. */ 1110 if (statesold & RCUTORTURE_RDR_IRQ) 1111 local_irq_enable(); 1112 if (statesold & RCUTORTURE_RDR_BH) 1113 local_bh_enable(); 1114 if (statesold & RCUTORTURE_RDR_PREEMPT) 1115 preempt_enable(); 1116 if (statesold & RCUTORTURE_RDR_RBH) 1117 rcu_read_unlock_bh(); 1118 if (statesold & RCUTORTURE_RDR_SCHED) 1119 rcu_read_unlock_sched(); 1120 if (statesold & RCUTORTURE_RDR_RCU) 1121 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1122 1123 /* Delay if neither beginning nor end and there was a change. */ 1124 if ((statesnew || statesold) && *readstate && newstate) 1125 cur_ops->read_delay(trsp, rtrsp); 1126 1127 /* Update the reader state. */ 1128 if (idxnew == -1) 1129 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1130 WARN_ON_ONCE(idxnew < 0); 1131 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1132 *readstate = idxnew | newstate; 1133 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1134 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1135 } 1136 1137 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1138 static int rcutorture_extend_mask_max(void) 1139 { 1140 int mask; 1141 1142 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1143 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1144 mask = mask | RCUTORTURE_RDR_RCU; 1145 return mask; 1146 } 1147 1148 /* Return a random protection state mask, but with at least one bit set. */ 1149 static int 1150 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1151 { 1152 int mask = rcutorture_extend_mask_max(); 1153 unsigned long randmask1 = torture_random(trsp) >> 8; 1154 unsigned long randmask2 = randmask1 >> 3; 1155 1156 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1157 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1158 if (!(randmask1 & 0x7)) 1159 mask = mask & randmask2; 1160 else 1161 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1162 /* Can't enable bh w/irq disabled. */ 1163 if ((mask & RCUTORTURE_RDR_IRQ) && 1164 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1165 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1166 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1167 return mask ?: RCUTORTURE_RDR_RCU; 1168 } 1169 1170 /* 1171 * Do a randomly selected number of extensions of an existing RCU read-side 1172 * critical section. 1173 */ 1174 static struct rt_read_seg * 1175 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1176 struct rt_read_seg *rtrsp) 1177 { 1178 int i; 1179 int j; 1180 int mask = rcutorture_extend_mask_max(); 1181 1182 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1183 if (!((mask - 1) & mask)) 1184 return rtrsp; /* Current RCU reader not extendable. */ 1185 /* Bias towards larger numbers of loops. */ 1186 i = (torture_random(trsp) >> 3); 1187 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1188 for (j = 0; j < i; j++) { 1189 mask = rcutorture_extend_mask(*readstate, trsp); 1190 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1191 } 1192 return &rtrsp[j]; 1193 } 1194 1195 /* 1196 * Do one read-side critical section, returning false if there was 1197 * no data to read. Can be invoked both from process context and 1198 * from a timer handler. 1199 */ 1200 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1201 { 1202 int i; 1203 unsigned long started; 1204 unsigned long completed; 1205 int newstate; 1206 struct rcu_torture *p; 1207 int pipe_count; 1208 int readstate = 0; 1209 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1210 struct rt_read_seg *rtrsp = &rtseg[0]; 1211 struct rt_read_seg *rtrsp1; 1212 unsigned long long ts; 1213 1214 newstate = rcutorture_extend_mask(readstate, trsp); 1215 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1216 started = cur_ops->get_gp_seq(); 1217 ts = rcu_trace_clock_local(); 1218 p = rcu_dereference_check(rcu_torture_current, 1219 rcu_read_lock_bh_held() || 1220 rcu_read_lock_sched_held() || 1221 srcu_read_lock_held(srcu_ctlp) || 1222 torturing_tasks()); 1223 if (p == NULL) { 1224 /* Wait for rcu_torture_writer to get underway */ 1225 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1226 return false; 1227 } 1228 if (p->rtort_mbtest == 0) 1229 atomic_inc(&n_rcu_torture_mberror); 1230 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1231 preempt_disable(); 1232 pipe_count = p->rtort_pipe_count; 1233 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1234 /* Should not happen, but... */ 1235 pipe_count = RCU_TORTURE_PIPE_LEN; 1236 } 1237 completed = cur_ops->get_gp_seq(); 1238 if (pipe_count > 1) { 1239 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1240 ts, started, completed); 1241 rcu_ftrace_dump(DUMP_ALL); 1242 } 1243 __this_cpu_inc(rcu_torture_count[pipe_count]); 1244 completed = rcutorture_seq_diff(completed, started); 1245 if (completed > RCU_TORTURE_PIPE_LEN) { 1246 /* Should not happen, but... */ 1247 completed = RCU_TORTURE_PIPE_LEN; 1248 } 1249 __this_cpu_inc(rcu_torture_batch[completed]); 1250 preempt_enable(); 1251 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1252 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1253 1254 /* If error or close call, record the sequence of reader protections. */ 1255 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1256 i = 0; 1257 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1258 err_segs[i++] = *rtrsp1; 1259 rt_read_nsegs = i; 1260 } 1261 1262 return true; 1263 } 1264 1265 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1266 1267 /* 1268 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1269 * incrementing the corresponding element of the pipeline array. The 1270 * counter in the element should never be greater than 1, otherwise, the 1271 * RCU implementation is broken. 1272 */ 1273 static void rcu_torture_timer(struct timer_list *unused) 1274 { 1275 atomic_long_inc(&n_rcu_torture_timers); 1276 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1277 1278 /* Test call_rcu() invocation from interrupt handler. */ 1279 if (cur_ops->call) { 1280 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1281 1282 if (rhp) 1283 cur_ops->call(rhp, rcu_torture_timer_cb); 1284 } 1285 } 1286 1287 /* 1288 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1289 * incrementing the corresponding element of the pipeline array. The 1290 * counter in the element should never be greater than 1, otherwise, the 1291 * RCU implementation is broken. 1292 */ 1293 static int 1294 rcu_torture_reader(void *arg) 1295 { 1296 unsigned long lastsleep = jiffies; 1297 long myid = (long)arg; 1298 int mynumonline = myid; 1299 DEFINE_TORTURE_RANDOM(rand); 1300 struct timer_list t; 1301 1302 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1303 set_user_nice(current, MAX_NICE); 1304 if (irqreader && cur_ops->irq_capable) 1305 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1306 1307 do { 1308 if (irqreader && cur_ops->irq_capable) { 1309 if (!timer_pending(&t)) 1310 mod_timer(&t, jiffies + 1); 1311 } 1312 if (!rcu_torture_one_read(&rand)) 1313 schedule_timeout_interruptible(HZ); 1314 if (time_after(jiffies, lastsleep)) { 1315 schedule_timeout_interruptible(1); 1316 lastsleep = jiffies + 10; 1317 } 1318 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1319 schedule_timeout_interruptible(HZ / 5); 1320 stutter_wait("rcu_torture_reader"); 1321 } while (!torture_must_stop()); 1322 if (irqreader && cur_ops->irq_capable) { 1323 del_timer_sync(&t); 1324 destroy_timer_on_stack(&t); 1325 } 1326 torture_kthread_stopping("rcu_torture_reader"); 1327 return 0; 1328 } 1329 1330 /* 1331 * Print torture statistics. Caller must ensure that there is only 1332 * one call to this function at a given time!!! This is normally 1333 * accomplished by relying on the module system to only have one copy 1334 * of the module loaded, and then by giving the rcu_torture_stats 1335 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1336 * thread is not running). 1337 */ 1338 static void 1339 rcu_torture_stats_print(void) 1340 { 1341 int cpu; 1342 int i; 1343 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1344 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1345 static unsigned long rtcv_snap = ULONG_MAX; 1346 static bool splatted; 1347 struct task_struct *wtp; 1348 1349 for_each_possible_cpu(cpu) { 1350 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1351 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1352 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1353 } 1354 } 1355 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1356 if (pipesummary[i] != 0) 1357 break; 1358 } 1359 1360 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1361 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1362 rcu_torture_current, 1363 rcu_torture_current_version, 1364 list_empty(&rcu_torture_freelist), 1365 atomic_read(&n_rcu_torture_alloc), 1366 atomic_read(&n_rcu_torture_alloc_fail), 1367 atomic_read(&n_rcu_torture_free)); 1368 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1369 atomic_read(&n_rcu_torture_mberror), 1370 n_rcu_torture_barrier_error, 1371 n_rcu_torture_boost_ktrerror, 1372 n_rcu_torture_boost_rterror); 1373 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1374 n_rcu_torture_boost_failure, 1375 n_rcu_torture_boosts, 1376 atomic_long_read(&n_rcu_torture_timers)); 1377 torture_onoff_stats(); 1378 pr_cont("barrier: %ld/%ld:%ld\n", 1379 n_barrier_successes, 1380 n_barrier_attempts, 1381 n_rcu_torture_barrier_error); 1382 1383 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1384 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1385 n_rcu_torture_barrier_error != 0 || 1386 n_rcu_torture_boost_ktrerror != 0 || 1387 n_rcu_torture_boost_rterror != 0 || 1388 n_rcu_torture_boost_failure != 0 || 1389 i > 1) { 1390 pr_cont("%s", "!!! "); 1391 atomic_inc(&n_rcu_torture_error); 1392 WARN_ON_ONCE(1); 1393 } 1394 pr_cont("Reader Pipe: "); 1395 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1396 pr_cont(" %ld", pipesummary[i]); 1397 pr_cont("\n"); 1398 1399 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1400 pr_cont("Reader Batch: "); 1401 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1402 pr_cont(" %ld", batchsummary[i]); 1403 pr_cont("\n"); 1404 1405 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1406 pr_cont("Free-Block Circulation: "); 1407 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1408 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1409 } 1410 pr_cont("\n"); 1411 1412 if (cur_ops->stats) 1413 cur_ops->stats(); 1414 if (rtcv_snap == rcu_torture_current_version && 1415 rcu_torture_current != NULL) { 1416 int __maybe_unused flags = 0; 1417 unsigned long __maybe_unused gp_seq = 0; 1418 1419 rcutorture_get_gp_data(cur_ops->ttype, 1420 &flags, &gp_seq); 1421 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1422 &flags, &gp_seq); 1423 wtp = READ_ONCE(writer_task); 1424 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1425 rcu_torture_writer_state_getname(), 1426 rcu_torture_writer_state, gp_seq, flags, 1427 wtp == NULL ? ~0UL : wtp->state, 1428 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1429 if (!splatted && wtp) { 1430 sched_show_task(wtp); 1431 splatted = true; 1432 } 1433 show_rcu_gp_kthreads(); 1434 rcu_ftrace_dump(DUMP_ALL); 1435 } 1436 rtcv_snap = rcu_torture_current_version; 1437 } 1438 1439 /* 1440 * Periodically prints torture statistics, if periodic statistics printing 1441 * was specified via the stat_interval module parameter. 1442 */ 1443 static int 1444 rcu_torture_stats(void *arg) 1445 { 1446 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1447 do { 1448 schedule_timeout_interruptible(stat_interval * HZ); 1449 rcu_torture_stats_print(); 1450 torture_shutdown_absorb("rcu_torture_stats"); 1451 } while (!torture_must_stop()); 1452 torture_kthread_stopping("rcu_torture_stats"); 1453 return 0; 1454 } 1455 1456 static void 1457 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1458 { 1459 pr_alert("%s" TORTURE_FLAG 1460 "--- %s: nreaders=%d nfakewriters=%d " 1461 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1462 "shuffle_interval=%d stutter=%d irqreader=%d " 1463 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1464 "test_boost=%d/%d test_boost_interval=%d " 1465 "test_boost_duration=%d shutdown_secs=%d " 1466 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1467 "n_barrier_cbs=%d " 1468 "onoff_interval=%d onoff_holdoff=%d\n", 1469 torture_type, tag, nrealreaders, nfakewriters, 1470 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1471 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1472 test_boost, cur_ops->can_boost, 1473 test_boost_interval, test_boost_duration, shutdown_secs, 1474 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1475 n_barrier_cbs, 1476 onoff_interval, onoff_holdoff); 1477 } 1478 1479 static int rcutorture_booster_cleanup(unsigned int cpu) 1480 { 1481 struct task_struct *t; 1482 1483 if (boost_tasks[cpu] == NULL) 1484 return 0; 1485 mutex_lock(&boost_mutex); 1486 t = boost_tasks[cpu]; 1487 boost_tasks[cpu] = NULL; 1488 rcu_torture_enable_rt_throttle(); 1489 mutex_unlock(&boost_mutex); 1490 1491 /* This must be outside of the mutex, otherwise deadlock! */ 1492 torture_stop_kthread(rcu_torture_boost, t); 1493 return 0; 1494 } 1495 1496 static int rcutorture_booster_init(unsigned int cpu) 1497 { 1498 int retval; 1499 1500 if (boost_tasks[cpu] != NULL) 1501 return 0; /* Already created, nothing more to do. */ 1502 1503 /* Don't allow time recalculation while creating a new task. */ 1504 mutex_lock(&boost_mutex); 1505 rcu_torture_disable_rt_throttle(); 1506 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1507 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1508 cpu_to_node(cpu), 1509 "rcu_torture_boost"); 1510 if (IS_ERR(boost_tasks[cpu])) { 1511 retval = PTR_ERR(boost_tasks[cpu]); 1512 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1513 n_rcu_torture_boost_ktrerror++; 1514 boost_tasks[cpu] = NULL; 1515 mutex_unlock(&boost_mutex); 1516 return retval; 1517 } 1518 kthread_bind(boost_tasks[cpu], cpu); 1519 wake_up_process(boost_tasks[cpu]); 1520 mutex_unlock(&boost_mutex); 1521 return 0; 1522 } 1523 1524 /* 1525 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1526 * induces a CPU stall for the time specified by stall_cpu. 1527 */ 1528 static int rcu_torture_stall(void *args) 1529 { 1530 unsigned long stop_at; 1531 1532 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1533 if (stall_cpu_holdoff > 0) { 1534 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1535 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1536 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1537 } 1538 if (!kthread_should_stop()) { 1539 stop_at = ktime_get_seconds() + stall_cpu; 1540 /* RCU CPU stall is expected behavior in following code. */ 1541 rcu_read_lock(); 1542 if (stall_cpu_irqsoff) 1543 local_irq_disable(); 1544 else 1545 preempt_disable(); 1546 pr_alert("rcu_torture_stall start on CPU %d.\n", 1547 smp_processor_id()); 1548 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1549 stop_at)) 1550 continue; /* Induce RCU CPU stall warning. */ 1551 if (stall_cpu_irqsoff) 1552 local_irq_enable(); 1553 else 1554 preempt_enable(); 1555 rcu_read_unlock(); 1556 pr_alert("rcu_torture_stall end.\n"); 1557 } 1558 torture_shutdown_absorb("rcu_torture_stall"); 1559 while (!kthread_should_stop()) 1560 schedule_timeout_interruptible(10 * HZ); 1561 return 0; 1562 } 1563 1564 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1565 static int __init rcu_torture_stall_init(void) 1566 { 1567 if (stall_cpu <= 0) 1568 return 0; 1569 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1570 } 1571 1572 /* State structure for forward-progress self-propagating RCU callback. */ 1573 struct fwd_cb_state { 1574 struct rcu_head rh; 1575 int stop; 1576 }; 1577 1578 /* 1579 * Forward-progress self-propagating RCU callback function. Because 1580 * callbacks run from softirq, this function is an implicit RCU read-side 1581 * critical section. 1582 */ 1583 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1584 { 1585 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1586 1587 if (READ_ONCE(fcsp->stop)) { 1588 WRITE_ONCE(fcsp->stop, 2); 1589 return; 1590 } 1591 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1592 } 1593 1594 /* State for continuous-flood RCU callbacks. */ 1595 struct rcu_fwd_cb { 1596 struct rcu_head rh; 1597 struct rcu_fwd_cb *rfc_next; 1598 int rfc_gps; 1599 }; 1600 static DEFINE_SPINLOCK(rcu_fwd_lock); 1601 static struct rcu_fwd_cb *rcu_fwd_cb_head; 1602 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1603 static long n_launders_cb; 1604 static unsigned long rcu_fwd_startat; 1605 static bool rcu_fwd_emergency_stop; 1606 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1607 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1608 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1609 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1610 struct rcu_launder_hist { 1611 long n_launders; 1612 unsigned long launder_gp_seq; 1613 }; 1614 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1615 static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1616 static unsigned long rcu_launder_gp_seq_start; 1617 1618 static void rcu_torture_fwd_cb_hist(void) 1619 { 1620 unsigned long gps; 1621 unsigned long gps_old; 1622 int i; 1623 int j; 1624 1625 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1626 if (n_launders_hist[i].n_launders > 0) 1627 break; 1628 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1629 __func__, jiffies - rcu_fwd_startat); 1630 gps_old = rcu_launder_gp_seq_start; 1631 for (j = 0; j <= i; j++) { 1632 gps = n_launders_hist[j].launder_gp_seq; 1633 pr_cont(" %ds/%d: %ld:%ld", 1634 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders, 1635 rcutorture_seq_diff(gps, gps_old)); 1636 gps_old = gps; 1637 } 1638 pr_cont("\n"); 1639 } 1640 1641 /* Callback function for continuous-flood RCU callbacks. */ 1642 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1643 { 1644 unsigned long flags; 1645 int i; 1646 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1647 struct rcu_fwd_cb **rfcpp; 1648 1649 rfcp->rfc_next = NULL; 1650 rfcp->rfc_gps++; 1651 spin_lock_irqsave(&rcu_fwd_lock, flags); 1652 rfcpp = rcu_fwd_cb_tail; 1653 rcu_fwd_cb_tail = &rfcp->rfc_next; 1654 WRITE_ONCE(*rfcpp, rfcp); 1655 WRITE_ONCE(n_launders_cb, n_launders_cb + 1); 1656 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1657 if (i >= ARRAY_SIZE(n_launders_hist)) 1658 i = ARRAY_SIZE(n_launders_hist) - 1; 1659 n_launders_hist[i].n_launders++; 1660 n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1661 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1662 } 1663 1664 /* 1665 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1666 * test is over or because we hit an OOM event. 1667 */ 1668 static unsigned long rcu_torture_fwd_prog_cbfree(void) 1669 { 1670 unsigned long flags; 1671 unsigned long freed = 0; 1672 struct rcu_fwd_cb *rfcp; 1673 1674 for (;;) { 1675 spin_lock_irqsave(&rcu_fwd_lock, flags); 1676 rfcp = rcu_fwd_cb_head; 1677 if (!rfcp) 1678 break; 1679 rcu_fwd_cb_head = rfcp->rfc_next; 1680 if (!rcu_fwd_cb_head) 1681 rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1682 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1683 kfree(rfcp); 1684 freed++; 1685 } 1686 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1687 return freed; 1688 } 1689 1690 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1691 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) 1692 { 1693 unsigned long cver; 1694 unsigned long dur; 1695 struct fwd_cb_state fcs; 1696 unsigned long gps; 1697 int idx; 1698 int sd; 1699 int sd4; 1700 bool selfpropcb = false; 1701 unsigned long stopat; 1702 static DEFINE_TORTURE_RANDOM(trs); 1703 1704 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1705 init_rcu_head_on_stack(&fcs.rh); 1706 selfpropcb = true; 1707 } 1708 1709 /* Tight loop containing cond_resched(). */ 1710 if (selfpropcb) { 1711 WRITE_ONCE(fcs.stop, 0); 1712 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1713 } 1714 cver = READ_ONCE(rcu_torture_current_version); 1715 gps = cur_ops->get_gp_seq(); 1716 sd = cur_ops->stall_dur() + 1; 1717 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1718 dur = sd4 + torture_random(&trs) % (sd - sd4); 1719 WRITE_ONCE(rcu_fwd_startat, jiffies); 1720 stopat = rcu_fwd_startat + dur; 1721 while (time_before(jiffies, stopat) && 1722 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1723 idx = cur_ops->readlock(); 1724 udelay(10); 1725 cur_ops->readunlock(idx); 1726 if (!fwd_progress_need_resched || need_resched()) 1727 cond_resched(); 1728 } 1729 (*tested_tries)++; 1730 if (!time_before(jiffies, stopat) && 1731 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1732 (*tested)++; 1733 cver = READ_ONCE(rcu_torture_current_version) - cver; 1734 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1735 WARN_ON(!cver && gps < 2); 1736 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1737 } 1738 if (selfpropcb) { 1739 WRITE_ONCE(fcs.stop, 1); 1740 cur_ops->sync(); /* Wait for running CB to complete. */ 1741 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1742 } 1743 1744 if (selfpropcb) { 1745 WARN_ON(READ_ONCE(fcs.stop) != 2); 1746 destroy_rcu_head_on_stack(&fcs.rh); 1747 } 1748 } 1749 1750 /* Carry out call_rcu() forward-progress testing. */ 1751 static void rcu_torture_fwd_prog_cr(void) 1752 { 1753 unsigned long cver; 1754 unsigned long gps; 1755 int i; 1756 long n_launders; 1757 long n_launders_cb_snap; 1758 long n_launders_sa; 1759 long n_max_cbs; 1760 long n_max_gps; 1761 struct rcu_fwd_cb *rfcp; 1762 struct rcu_fwd_cb *rfcpn; 1763 unsigned long stopat; 1764 unsigned long stoppedat; 1765 1766 if (READ_ONCE(rcu_fwd_emergency_stop)) 1767 return; /* Get out of the way quickly, no GP wait! */ 1768 1769 /* Loop continuously posting RCU callbacks. */ 1770 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1771 cur_ops->sync(); /* Later readers see above write. */ 1772 WRITE_ONCE(rcu_fwd_startat, jiffies); 1773 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1774 n_launders = 0; 1775 n_launders_cb = 0; 1776 n_launders_sa = 0; 1777 n_max_cbs = 0; 1778 n_max_gps = 0; 1779 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1780 n_launders_hist[i].n_launders = 0; 1781 cver = READ_ONCE(rcu_torture_current_version); 1782 gps = cur_ops->get_gp_seq(); 1783 rcu_launder_gp_seq_start = gps; 1784 while (time_before(jiffies, stopat) && 1785 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1786 rfcp = READ_ONCE(rcu_fwd_cb_head); 1787 rfcpn = NULL; 1788 if (rfcp) 1789 rfcpn = READ_ONCE(rfcp->rfc_next); 1790 if (rfcpn) { 1791 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1792 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1793 break; 1794 rcu_fwd_cb_head = rfcpn; 1795 n_launders++; 1796 n_launders_sa++; 1797 } else { 1798 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1799 if (WARN_ON_ONCE(!rfcp)) { 1800 schedule_timeout_interruptible(1); 1801 continue; 1802 } 1803 n_max_cbs++; 1804 n_launders_sa = 0; 1805 rfcp->rfc_gps = 0; 1806 } 1807 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1808 cond_resched(); 1809 } 1810 stoppedat = jiffies; 1811 n_launders_cb_snap = READ_ONCE(n_launders_cb); 1812 cver = READ_ONCE(rcu_torture_current_version) - cver; 1813 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1814 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1815 (void)rcu_torture_fwd_prog_cbfree(); 1816 1817 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1818 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) { 1819 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1820 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1821 __func__, 1822 stoppedat - rcu_fwd_startat, jiffies - stoppedat, 1823 n_launders + n_max_cbs - n_launders_cb_snap, 1824 n_launders, n_launders_sa, 1825 n_max_gps, n_max_cbs, cver, gps); 1826 rcu_torture_fwd_cb_hist(); 1827 } 1828 } 1829 1830 1831 /* 1832 * OOM notifier, but this only prints diagnostic information for the 1833 * current forward-progress test. 1834 */ 1835 static int rcutorture_oom_notify(struct notifier_block *self, 1836 unsigned long notused, void *nfreed) 1837 { 1838 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 1839 __func__); 1840 rcu_torture_fwd_cb_hist(); 1841 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2); 1842 WRITE_ONCE(rcu_fwd_emergency_stop, true); 1843 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 1844 pr_info("%s: Freed %lu RCU callbacks.\n", 1845 __func__, rcu_torture_fwd_prog_cbfree()); 1846 rcu_barrier(); 1847 pr_info("%s: Freed %lu RCU callbacks.\n", 1848 __func__, rcu_torture_fwd_prog_cbfree()); 1849 rcu_barrier(); 1850 pr_info("%s: Freed %lu RCU callbacks.\n", 1851 __func__, rcu_torture_fwd_prog_cbfree()); 1852 smp_mb(); /* Frees before return to avoid redoing OOM. */ 1853 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 1854 pr_info("%s returning after OOM processing.\n", __func__); 1855 return NOTIFY_OK; 1856 } 1857 1858 static struct notifier_block rcutorture_oom_nb = { 1859 .notifier_call = rcutorture_oom_notify 1860 }; 1861 1862 /* Carry out grace-period forward-progress testing. */ 1863 static int rcu_torture_fwd_prog(void *args) 1864 { 1865 int tested = 0; 1866 int tested_tries = 0; 1867 1868 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 1869 rcu_bind_current_to_nocb(); 1870 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 1871 set_user_nice(current, MAX_NICE); 1872 do { 1873 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 1874 WRITE_ONCE(rcu_fwd_emergency_stop, false); 1875 register_oom_notifier(&rcutorture_oom_nb); 1876 rcu_torture_fwd_prog_nr(&tested, &tested_tries); 1877 rcu_torture_fwd_prog_cr(); 1878 unregister_oom_notifier(&rcutorture_oom_nb); 1879 1880 /* Avoid slow periods, better to test when busy. */ 1881 stutter_wait("rcu_torture_fwd_prog"); 1882 } while (!torture_must_stop()); 1883 /* Short runs might not contain a valid forward-progress attempt. */ 1884 WARN_ON(!tested && tested_tries >= 5); 1885 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 1886 torture_kthread_stopping("rcu_torture_fwd_prog"); 1887 return 0; 1888 } 1889 1890 /* If forward-progress checking is requested and feasible, spawn the thread. */ 1891 static int __init rcu_torture_fwd_prog_init(void) 1892 { 1893 if (!fwd_progress) 1894 return 0; /* Not requested, so don't do it. */ 1895 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 1896 cur_ops == &rcu_busted_ops) { 1897 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 1898 return 0; 1899 } 1900 if (stall_cpu > 0) { 1901 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 1902 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 1903 return -EINVAL; /* In module, can fail back to user. */ 1904 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 1905 return 0; 1906 } 1907 if (fwd_progress_holdoff <= 0) 1908 fwd_progress_holdoff = 1; 1909 if (fwd_progress_div <= 0) 1910 fwd_progress_div = 4; 1911 return torture_create_kthread(rcu_torture_fwd_prog, 1912 NULL, fwd_prog_task); 1913 } 1914 1915 /* Callback function for RCU barrier testing. */ 1916 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1917 { 1918 atomic_inc(&barrier_cbs_invoked); 1919 } 1920 1921 /* kthread function to register callbacks used to test RCU barriers. */ 1922 static int rcu_torture_barrier_cbs(void *arg) 1923 { 1924 long myid = (long)arg; 1925 bool lastphase = 0; 1926 bool newphase; 1927 struct rcu_head rcu; 1928 1929 init_rcu_head_on_stack(&rcu); 1930 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 1931 set_user_nice(current, MAX_NICE); 1932 do { 1933 wait_event(barrier_cbs_wq[myid], 1934 (newphase = 1935 smp_load_acquire(&barrier_phase)) != lastphase || 1936 torture_must_stop()); 1937 lastphase = newphase; 1938 if (torture_must_stop()) 1939 break; 1940 /* 1941 * The above smp_load_acquire() ensures barrier_phase load 1942 * is ordered before the following ->call(). 1943 */ 1944 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 1945 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1946 local_irq_enable(); 1947 if (atomic_dec_and_test(&barrier_cbs_count)) 1948 wake_up(&barrier_wq); 1949 } while (!torture_must_stop()); 1950 if (cur_ops->cb_barrier != NULL) 1951 cur_ops->cb_barrier(); 1952 destroy_rcu_head_on_stack(&rcu); 1953 torture_kthread_stopping("rcu_torture_barrier_cbs"); 1954 return 0; 1955 } 1956 1957 /* kthread function to drive and coordinate RCU barrier testing. */ 1958 static int rcu_torture_barrier(void *arg) 1959 { 1960 int i; 1961 1962 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 1963 do { 1964 atomic_set(&barrier_cbs_invoked, 0); 1965 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1966 /* Ensure barrier_phase ordered after prior assignments. */ 1967 smp_store_release(&barrier_phase, !barrier_phase); 1968 for (i = 0; i < n_barrier_cbs; i++) 1969 wake_up(&barrier_cbs_wq[i]); 1970 wait_event(barrier_wq, 1971 atomic_read(&barrier_cbs_count) == 0 || 1972 torture_must_stop()); 1973 if (torture_must_stop()) 1974 break; 1975 n_barrier_attempts++; 1976 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1977 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1978 n_rcu_torture_barrier_error++; 1979 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 1980 atomic_read(&barrier_cbs_invoked), 1981 n_barrier_cbs); 1982 WARN_ON_ONCE(1); 1983 } else { 1984 n_barrier_successes++; 1985 } 1986 schedule_timeout_interruptible(HZ / 10); 1987 } while (!torture_must_stop()); 1988 torture_kthread_stopping("rcu_torture_barrier"); 1989 return 0; 1990 } 1991 1992 /* Initialize RCU barrier testing. */ 1993 static int rcu_torture_barrier_init(void) 1994 { 1995 int i; 1996 int ret; 1997 1998 if (n_barrier_cbs <= 0) 1999 return 0; 2000 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2001 pr_alert("%s" TORTURE_FLAG 2002 " Call or barrier ops missing for %s,\n", 2003 torture_type, cur_ops->name); 2004 pr_alert("%s" TORTURE_FLAG 2005 " RCU barrier testing omitted from run.\n", 2006 torture_type); 2007 return 0; 2008 } 2009 atomic_set(&barrier_cbs_count, 0); 2010 atomic_set(&barrier_cbs_invoked, 0); 2011 barrier_cbs_tasks = 2012 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2013 GFP_KERNEL); 2014 barrier_cbs_wq = 2015 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2016 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2017 return -ENOMEM; 2018 for (i = 0; i < n_barrier_cbs; i++) { 2019 init_waitqueue_head(&barrier_cbs_wq[i]); 2020 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2021 (void *)(long)i, 2022 barrier_cbs_tasks[i]); 2023 if (ret) 2024 return ret; 2025 } 2026 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2027 } 2028 2029 /* Clean up after RCU barrier testing. */ 2030 static void rcu_torture_barrier_cleanup(void) 2031 { 2032 int i; 2033 2034 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2035 if (barrier_cbs_tasks != NULL) { 2036 for (i = 0; i < n_barrier_cbs; i++) 2037 torture_stop_kthread(rcu_torture_barrier_cbs, 2038 barrier_cbs_tasks[i]); 2039 kfree(barrier_cbs_tasks); 2040 barrier_cbs_tasks = NULL; 2041 } 2042 if (barrier_cbs_wq != NULL) { 2043 kfree(barrier_cbs_wq); 2044 barrier_cbs_wq = NULL; 2045 } 2046 } 2047 2048 static bool rcu_torture_can_boost(void) 2049 { 2050 static int boost_warn_once; 2051 int prio; 2052 2053 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2054 return false; 2055 2056 prio = rcu_get_gp_kthreads_prio(); 2057 if (!prio) 2058 return false; 2059 2060 if (prio < 2) { 2061 if (boost_warn_once == 1) 2062 return false; 2063 2064 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2065 boost_warn_once = 1; 2066 return false; 2067 } 2068 2069 return true; 2070 } 2071 2072 static enum cpuhp_state rcutor_hp; 2073 2074 static void 2075 rcu_torture_cleanup(void) 2076 { 2077 int firsttime; 2078 int flags = 0; 2079 unsigned long gp_seq = 0; 2080 int i; 2081 2082 if (torture_cleanup_begin()) { 2083 if (cur_ops->cb_barrier != NULL) 2084 cur_ops->cb_barrier(); 2085 return; 2086 } 2087 if (!cur_ops) { 2088 torture_cleanup_end(); 2089 return; 2090 } 2091 2092 rcu_torture_barrier_cleanup(); 2093 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2094 torture_stop_kthread(rcu_torture_stall, stall_task); 2095 torture_stop_kthread(rcu_torture_writer, writer_task); 2096 2097 if (reader_tasks) { 2098 for (i = 0; i < nrealreaders; i++) 2099 torture_stop_kthread(rcu_torture_reader, 2100 reader_tasks[i]); 2101 kfree(reader_tasks); 2102 } 2103 rcu_torture_current = NULL; 2104 2105 if (fakewriter_tasks) { 2106 for (i = 0; i < nfakewriters; i++) { 2107 torture_stop_kthread(rcu_torture_fakewriter, 2108 fakewriter_tasks[i]); 2109 } 2110 kfree(fakewriter_tasks); 2111 fakewriter_tasks = NULL; 2112 } 2113 2114 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2115 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2116 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2117 cur_ops->name, gp_seq, flags); 2118 torture_stop_kthread(rcu_torture_stats, stats_task); 2119 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2120 if (rcu_torture_can_boost()) 2121 cpuhp_remove_state(rcutor_hp); 2122 2123 /* 2124 * Wait for all RCU callbacks to fire, then do torture-type-specific 2125 * cleanup operations. 2126 */ 2127 if (cur_ops->cb_barrier != NULL) 2128 cur_ops->cb_barrier(); 2129 if (cur_ops->cleanup != NULL) 2130 cur_ops->cleanup(); 2131 2132 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2133 2134 if (err_segs_recorded) { 2135 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2136 if (rt_read_nsegs == 0) 2137 pr_alert("\t: No segments recorded!!!\n"); 2138 firsttime = 1; 2139 for (i = 0; i < rt_read_nsegs; i++) { 2140 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2141 if (err_segs[i].rt_delay_jiffies != 0) { 2142 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2143 err_segs[i].rt_delay_jiffies); 2144 firsttime = 0; 2145 } 2146 if (err_segs[i].rt_delay_ms != 0) { 2147 pr_cont("%s%ldms", firsttime ? "" : "+", 2148 err_segs[i].rt_delay_ms); 2149 firsttime = 0; 2150 } 2151 if (err_segs[i].rt_delay_us != 0) { 2152 pr_cont("%s%ldus", firsttime ? "" : "+", 2153 err_segs[i].rt_delay_us); 2154 firsttime = 0; 2155 } 2156 pr_cont("%s\n", 2157 err_segs[i].rt_preempted ? "preempted" : ""); 2158 2159 } 2160 } 2161 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2162 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2163 else if (torture_onoff_failures()) 2164 rcu_torture_print_module_parms(cur_ops, 2165 "End of test: RCU_HOTPLUG"); 2166 else 2167 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2168 torture_cleanup_end(); 2169 } 2170 2171 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2172 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2173 { 2174 } 2175 2176 static void rcu_torture_err_cb(struct rcu_head *rhp) 2177 { 2178 /* 2179 * This -might- happen due to race conditions, but is unlikely. 2180 * The scenario that leads to this happening is that the 2181 * first of the pair of duplicate callbacks is queued, 2182 * someone else starts a grace period that includes that 2183 * callback, then the second of the pair must wait for the 2184 * next grace period. Unlikely, but can happen. If it 2185 * does happen, the debug-objects subsystem won't have splatted. 2186 */ 2187 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2188 } 2189 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2190 2191 /* 2192 * Verify that double-free causes debug-objects to complain, but only 2193 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2194 * cannot be carried out. 2195 */ 2196 static void rcu_test_debug_objects(void) 2197 { 2198 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2199 struct rcu_head rh1; 2200 struct rcu_head rh2; 2201 2202 init_rcu_head_on_stack(&rh1); 2203 init_rcu_head_on_stack(&rh2); 2204 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2205 2206 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2207 preempt_disable(); /* Prevent preemption from interrupting test. */ 2208 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2209 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2210 local_irq_disable(); /* Make it harder to start a new grace period. */ 2211 call_rcu(&rh2, rcu_torture_leak_cb); 2212 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2213 local_irq_enable(); 2214 rcu_read_unlock(); 2215 preempt_enable(); 2216 2217 /* Wait for them all to get done so we can safely return. */ 2218 rcu_barrier(); 2219 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2220 destroy_rcu_head_on_stack(&rh1); 2221 destroy_rcu_head_on_stack(&rh2); 2222 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2223 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2224 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2225 } 2226 2227 static void rcutorture_sync(void) 2228 { 2229 static unsigned long n; 2230 2231 if (cur_ops->sync && !(++n & 0xfff)) 2232 cur_ops->sync(); 2233 } 2234 2235 static int __init 2236 rcu_torture_init(void) 2237 { 2238 long i; 2239 int cpu; 2240 int firsterr = 0; 2241 static struct rcu_torture_ops *torture_ops[] = { 2242 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2243 &busted_srcud_ops, &tasks_ops, 2244 }; 2245 2246 if (!torture_init_begin(torture_type, verbose)) 2247 return -EBUSY; 2248 2249 /* Process args and tell the world that the torturer is on the job. */ 2250 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2251 cur_ops = torture_ops[i]; 2252 if (strcmp(torture_type, cur_ops->name) == 0) 2253 break; 2254 } 2255 if (i == ARRAY_SIZE(torture_ops)) { 2256 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2257 torture_type); 2258 pr_alert("rcu-torture types:"); 2259 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2260 pr_cont(" %s", torture_ops[i]->name); 2261 pr_cont("\n"); 2262 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2263 firsterr = -EINVAL; 2264 cur_ops = NULL; 2265 goto unwind; 2266 } 2267 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2268 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2269 fqs_duration = 0; 2270 } 2271 if (cur_ops->init) 2272 cur_ops->init(); 2273 2274 if (nreaders >= 0) { 2275 nrealreaders = nreaders; 2276 } else { 2277 nrealreaders = num_online_cpus() - 2 - nreaders; 2278 if (nrealreaders <= 0) 2279 nrealreaders = 1; 2280 } 2281 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2282 2283 /* Set up the freelist. */ 2284 2285 INIT_LIST_HEAD(&rcu_torture_freelist); 2286 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2287 rcu_tortures[i].rtort_mbtest = 0; 2288 list_add_tail(&rcu_tortures[i].rtort_free, 2289 &rcu_torture_freelist); 2290 } 2291 2292 /* Initialize the statistics so that each run gets its own numbers. */ 2293 2294 rcu_torture_current = NULL; 2295 rcu_torture_current_version = 0; 2296 atomic_set(&n_rcu_torture_alloc, 0); 2297 atomic_set(&n_rcu_torture_alloc_fail, 0); 2298 atomic_set(&n_rcu_torture_free, 0); 2299 atomic_set(&n_rcu_torture_mberror, 0); 2300 atomic_set(&n_rcu_torture_error, 0); 2301 n_rcu_torture_barrier_error = 0; 2302 n_rcu_torture_boost_ktrerror = 0; 2303 n_rcu_torture_boost_rterror = 0; 2304 n_rcu_torture_boost_failure = 0; 2305 n_rcu_torture_boosts = 0; 2306 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2307 atomic_set(&rcu_torture_wcount[i], 0); 2308 for_each_possible_cpu(cpu) { 2309 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2310 per_cpu(rcu_torture_count, cpu)[i] = 0; 2311 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2312 } 2313 } 2314 err_segs_recorded = 0; 2315 rt_read_nsegs = 0; 2316 2317 /* Start up the kthreads. */ 2318 2319 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2320 writer_task); 2321 if (firsterr) 2322 goto unwind; 2323 if (nfakewriters > 0) { 2324 fakewriter_tasks = kcalloc(nfakewriters, 2325 sizeof(fakewriter_tasks[0]), 2326 GFP_KERNEL); 2327 if (fakewriter_tasks == NULL) { 2328 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2329 firsterr = -ENOMEM; 2330 goto unwind; 2331 } 2332 } 2333 for (i = 0; i < nfakewriters; i++) { 2334 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2335 NULL, fakewriter_tasks[i]); 2336 if (firsterr) 2337 goto unwind; 2338 } 2339 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2340 GFP_KERNEL); 2341 if (reader_tasks == NULL) { 2342 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2343 firsterr = -ENOMEM; 2344 goto unwind; 2345 } 2346 for (i = 0; i < nrealreaders; i++) { 2347 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2348 reader_tasks[i]); 2349 if (firsterr) 2350 goto unwind; 2351 } 2352 if (stat_interval > 0) { 2353 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2354 stats_task); 2355 if (firsterr) 2356 goto unwind; 2357 } 2358 if (test_no_idle_hz && shuffle_interval > 0) { 2359 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2360 if (firsterr) 2361 goto unwind; 2362 } 2363 if (stutter < 0) 2364 stutter = 0; 2365 if (stutter) { 2366 firsterr = torture_stutter_init(stutter * HZ); 2367 if (firsterr) 2368 goto unwind; 2369 } 2370 if (fqs_duration < 0) 2371 fqs_duration = 0; 2372 if (fqs_duration) { 2373 /* Create the fqs thread */ 2374 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2375 fqs_task); 2376 if (firsterr) 2377 goto unwind; 2378 } 2379 if (test_boost_interval < 1) 2380 test_boost_interval = 1; 2381 if (test_boost_duration < 2) 2382 test_boost_duration = 2; 2383 if (rcu_torture_can_boost()) { 2384 2385 boost_starttime = jiffies + test_boost_interval * HZ; 2386 2387 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2388 rcutorture_booster_init, 2389 rcutorture_booster_cleanup); 2390 if (firsterr < 0) 2391 goto unwind; 2392 rcutor_hp = firsterr; 2393 } 2394 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2395 if (firsterr) 2396 goto unwind; 2397 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2398 rcutorture_sync); 2399 if (firsterr) 2400 goto unwind; 2401 firsterr = rcu_torture_stall_init(); 2402 if (firsterr) 2403 goto unwind; 2404 firsterr = rcu_torture_fwd_prog_init(); 2405 if (firsterr) 2406 goto unwind; 2407 firsterr = rcu_torture_barrier_init(); 2408 if (firsterr) 2409 goto unwind; 2410 if (object_debug) 2411 rcu_test_debug_objects(); 2412 torture_init_end(); 2413 return 0; 2414 2415 unwind: 2416 torture_init_end(); 2417 rcu_torture_cleanup(); 2418 return firsterr; 2419 } 2420 2421 module_init(rcu_torture_init); 2422 module_exit(rcu_torture_cleanup); 2423