1 /* 2 * Read-Copy Update module-based torture test facility 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2005, 2006 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Josh Triplett <josh@joshtriplett.org> 22 * 23 * See also: Documentation/RCU/torture.txt 24 */ 25 26 #define pr_fmt(fmt) fmt 27 28 #include <linux/types.h> 29 #include <linux/kernel.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/kthread.h> 33 #include <linux/err.h> 34 #include <linux/spinlock.h> 35 #include <linux/smp.h> 36 #include <linux/rcupdate.h> 37 #include <linux/interrupt.h> 38 #include <linux/sched/signal.h> 39 #include <uapi/linux/sched/types.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/completion.h> 43 #include <linux/moduleparam.h> 44 #include <linux/percpu.h> 45 #include <linux/notifier.h> 46 #include <linux/reboot.h> 47 #include <linux/freezer.h> 48 #include <linux/cpu.h> 49 #include <linux/delay.h> 50 #include <linux/stat.h> 51 #include <linux/srcu.h> 52 #include <linux/slab.h> 53 #include <linux/trace_clock.h> 54 #include <asm/byteorder.h> 55 #include <linux/torture.h> 56 #include <linux/vmalloc.h> 57 #include <linux/sched/debug.h> 58 #include <linux/sched/sysctl.h> 59 #include <linux/oom.h> 60 61 #include "rcu.h" 62 63 MODULE_LICENSE("GPL"); 64 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 65 66 67 /* Bits for ->extendables field, extendables param, and related definitions. */ 68 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 69 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 70 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 71 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 72 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 73 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 74 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 75 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 76 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 77 #define RCUTORTURE_MAX_EXTEND \ 78 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 79 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 80 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 81 /* Must be power of two minus one. */ 82 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 83 84 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 85 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 86 torture_param(int, fqs_duration, 0, 87 "Duration of fqs bursts (us), 0 to disable"); 88 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 89 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 90 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 91 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 92 torture_param(int, fwd_progress_holdoff, 60, 93 "Time between forward-progress tests (s)"); 94 torture_param(bool, fwd_progress_need_resched, 1, 95 "Hide cond_resched() behind need_resched()"); 96 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 97 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 98 torture_param(bool, gp_normal, false, 99 "Use normal (non-expedited) GP wait primitives"); 100 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 101 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 102 torture_param(int, n_barrier_cbs, 0, 103 "# of callbacks/kthreads for barrier testing"); 104 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 105 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 106 torture_param(int, object_debug, 0, 107 "Enable debug-object double call_rcu() testing"); 108 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 109 torture_param(int, onoff_interval, 0, 110 "Time between CPU hotplugs (jiffies), 0=disable"); 111 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 112 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 113 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 114 torture_param(int, stall_cpu_holdoff, 10, 115 "Time to wait before starting stall (s)."); 116 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 117 torture_param(int, stat_interval, 60, 118 "Number of seconds between stats printk()s"); 119 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 120 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 121 torture_param(int, test_boost_duration, 4, 122 "Duration of each boost test, seconds."); 123 torture_param(int, test_boost_interval, 7, 124 "Interval between boost tests, seconds."); 125 torture_param(bool, test_no_idle_hz, true, 126 "Test support for tickless idle CPUs"); 127 torture_param(int, verbose, 1, 128 "Enable verbose debugging printk()s"); 129 130 static char *torture_type = "rcu"; 131 module_param(torture_type, charp, 0444); 132 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 133 134 static int nrealreaders; 135 static struct task_struct *writer_task; 136 static struct task_struct **fakewriter_tasks; 137 static struct task_struct **reader_tasks; 138 static struct task_struct *stats_task; 139 static struct task_struct *fqs_task; 140 static struct task_struct *boost_tasks[NR_CPUS]; 141 static struct task_struct *stall_task; 142 static struct task_struct *fwd_prog_task; 143 static struct task_struct **barrier_cbs_tasks; 144 static struct task_struct *barrier_task; 145 146 #define RCU_TORTURE_PIPE_LEN 10 147 148 struct rcu_torture { 149 struct rcu_head rtort_rcu; 150 int rtort_pipe_count; 151 struct list_head rtort_free; 152 int rtort_mbtest; 153 }; 154 155 static LIST_HEAD(rcu_torture_freelist); 156 static struct rcu_torture __rcu *rcu_torture_current; 157 static unsigned long rcu_torture_current_version; 158 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 159 static DEFINE_SPINLOCK(rcu_torture_lock); 160 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 161 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 162 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 163 static atomic_t n_rcu_torture_alloc; 164 static atomic_t n_rcu_torture_alloc_fail; 165 static atomic_t n_rcu_torture_free; 166 static atomic_t n_rcu_torture_mberror; 167 static atomic_t n_rcu_torture_error; 168 static long n_rcu_torture_barrier_error; 169 static long n_rcu_torture_boost_ktrerror; 170 static long n_rcu_torture_boost_rterror; 171 static long n_rcu_torture_boost_failure; 172 static long n_rcu_torture_boosts; 173 static atomic_long_t n_rcu_torture_timers; 174 static long n_barrier_attempts; 175 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 176 static struct list_head rcu_torture_removed; 177 178 static int rcu_torture_writer_state; 179 #define RTWS_FIXED_DELAY 0 180 #define RTWS_DELAY 1 181 #define RTWS_REPLACE 2 182 #define RTWS_DEF_FREE 3 183 #define RTWS_EXP_SYNC 4 184 #define RTWS_COND_GET 5 185 #define RTWS_COND_SYNC 6 186 #define RTWS_SYNC 7 187 #define RTWS_STUTTER 8 188 #define RTWS_STOPPING 9 189 static const char * const rcu_torture_writer_state_names[] = { 190 "RTWS_FIXED_DELAY", 191 "RTWS_DELAY", 192 "RTWS_REPLACE", 193 "RTWS_DEF_FREE", 194 "RTWS_EXP_SYNC", 195 "RTWS_COND_GET", 196 "RTWS_COND_SYNC", 197 "RTWS_SYNC", 198 "RTWS_STUTTER", 199 "RTWS_STOPPING", 200 }; 201 202 /* Record reader segment types and duration for first failing read. */ 203 struct rt_read_seg { 204 int rt_readstate; 205 unsigned long rt_delay_jiffies; 206 unsigned long rt_delay_ms; 207 unsigned long rt_delay_us; 208 bool rt_preempted; 209 }; 210 static int err_segs_recorded; 211 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 212 static int rt_read_nsegs; 213 214 static const char *rcu_torture_writer_state_getname(void) 215 { 216 unsigned int i = READ_ONCE(rcu_torture_writer_state); 217 218 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 219 return "???"; 220 return rcu_torture_writer_state_names[i]; 221 } 222 223 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 224 #define rcu_can_boost() 1 225 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 226 #define rcu_can_boost() 0 227 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 228 229 #ifdef CONFIG_RCU_TRACE 230 static u64 notrace rcu_trace_clock_local(void) 231 { 232 u64 ts = trace_clock_local(); 233 234 (void)do_div(ts, NSEC_PER_USEC); 235 return ts; 236 } 237 #else /* #ifdef CONFIG_RCU_TRACE */ 238 static u64 notrace rcu_trace_clock_local(void) 239 { 240 return 0ULL; 241 } 242 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 243 244 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 245 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 246 /* and boost task create/destroy. */ 247 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 248 static bool barrier_phase; /* Test phase. */ 249 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 250 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 251 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 252 253 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 254 255 /* 256 * Allocate an element from the rcu_tortures pool. 257 */ 258 static struct rcu_torture * 259 rcu_torture_alloc(void) 260 { 261 struct list_head *p; 262 263 spin_lock_bh(&rcu_torture_lock); 264 if (list_empty(&rcu_torture_freelist)) { 265 atomic_inc(&n_rcu_torture_alloc_fail); 266 spin_unlock_bh(&rcu_torture_lock); 267 return NULL; 268 } 269 atomic_inc(&n_rcu_torture_alloc); 270 p = rcu_torture_freelist.next; 271 list_del_init(p); 272 spin_unlock_bh(&rcu_torture_lock); 273 return container_of(p, struct rcu_torture, rtort_free); 274 } 275 276 /* 277 * Free an element to the rcu_tortures pool. 278 */ 279 static void 280 rcu_torture_free(struct rcu_torture *p) 281 { 282 atomic_inc(&n_rcu_torture_free); 283 spin_lock_bh(&rcu_torture_lock); 284 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 285 spin_unlock_bh(&rcu_torture_lock); 286 } 287 288 /* 289 * Operations vector for selecting different types of tests. 290 */ 291 292 struct rcu_torture_ops { 293 int ttype; 294 void (*init)(void); 295 void (*cleanup)(void); 296 int (*readlock)(void); 297 void (*read_delay)(struct torture_random_state *rrsp, 298 struct rt_read_seg *rtrsp); 299 void (*readunlock)(int idx); 300 unsigned long (*get_gp_seq)(void); 301 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 302 void (*deferred_free)(struct rcu_torture *p); 303 void (*sync)(void); 304 void (*exp_sync)(void); 305 unsigned long (*get_state)(void); 306 void (*cond_sync)(unsigned long oldstate); 307 call_rcu_func_t call; 308 void (*cb_barrier)(void); 309 void (*fqs)(void); 310 void (*stats)(void); 311 int (*stall_dur)(void); 312 int irq_capable; 313 int can_boost; 314 int extendables; 315 int ext_irq_conflict; 316 const char *name; 317 }; 318 319 static struct rcu_torture_ops *cur_ops; 320 321 /* 322 * Definitions for rcu torture testing. 323 */ 324 325 static int rcu_torture_read_lock(void) __acquires(RCU) 326 { 327 rcu_read_lock(); 328 return 0; 329 } 330 331 static void 332 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 333 { 334 unsigned long started; 335 unsigned long completed; 336 const unsigned long shortdelay_us = 200; 337 unsigned long longdelay_ms = 300; 338 unsigned long long ts; 339 340 /* We want a short delay sometimes to make a reader delay the grace 341 * period, and we want a long delay occasionally to trigger 342 * force_quiescent_state. */ 343 344 if (!rcu_fwd_cb_nodelay && 345 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 346 started = cur_ops->get_gp_seq(); 347 ts = rcu_trace_clock_local(); 348 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 349 longdelay_ms = 5; /* Avoid triggering BH limits. */ 350 mdelay(longdelay_ms); 351 rtrsp->rt_delay_ms = longdelay_ms; 352 completed = cur_ops->get_gp_seq(); 353 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 354 started, completed); 355 } 356 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 357 udelay(shortdelay_us); 358 rtrsp->rt_delay_us = shortdelay_us; 359 } 360 if (!preempt_count() && 361 !(torture_random(rrsp) % (nrealreaders * 500))) { 362 torture_preempt_schedule(); /* QS only if preemptible. */ 363 rtrsp->rt_preempted = true; 364 } 365 } 366 367 static void rcu_torture_read_unlock(int idx) __releases(RCU) 368 { 369 rcu_read_unlock(); 370 } 371 372 /* 373 * Update callback in the pipe. This should be invoked after a grace period. 374 */ 375 static bool 376 rcu_torture_pipe_update_one(struct rcu_torture *rp) 377 { 378 int i; 379 380 i = rp->rtort_pipe_count; 381 if (i > RCU_TORTURE_PIPE_LEN) 382 i = RCU_TORTURE_PIPE_LEN; 383 atomic_inc(&rcu_torture_wcount[i]); 384 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 385 rp->rtort_mbtest = 0; 386 return true; 387 } 388 return false; 389 } 390 391 /* 392 * Update all callbacks in the pipe. Suitable for synchronous grace-period 393 * primitives. 394 */ 395 static void 396 rcu_torture_pipe_update(struct rcu_torture *old_rp) 397 { 398 struct rcu_torture *rp; 399 struct rcu_torture *rp1; 400 401 if (old_rp) 402 list_add(&old_rp->rtort_free, &rcu_torture_removed); 403 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 404 if (rcu_torture_pipe_update_one(rp)) { 405 list_del(&rp->rtort_free); 406 rcu_torture_free(rp); 407 } 408 } 409 } 410 411 static void 412 rcu_torture_cb(struct rcu_head *p) 413 { 414 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 415 416 if (torture_must_stop_irq()) { 417 /* Test is ending, just drop callbacks on the floor. */ 418 /* The next initialization will pick up the pieces. */ 419 return; 420 } 421 if (rcu_torture_pipe_update_one(rp)) 422 rcu_torture_free(rp); 423 else 424 cur_ops->deferred_free(rp); 425 } 426 427 static unsigned long rcu_no_completed(void) 428 { 429 return 0; 430 } 431 432 static void rcu_torture_deferred_free(struct rcu_torture *p) 433 { 434 call_rcu(&p->rtort_rcu, rcu_torture_cb); 435 } 436 437 static void rcu_sync_torture_init(void) 438 { 439 INIT_LIST_HEAD(&rcu_torture_removed); 440 } 441 442 static struct rcu_torture_ops rcu_ops = { 443 .ttype = RCU_FLAVOR, 444 .init = rcu_sync_torture_init, 445 .readlock = rcu_torture_read_lock, 446 .read_delay = rcu_read_delay, 447 .readunlock = rcu_torture_read_unlock, 448 .get_gp_seq = rcu_get_gp_seq, 449 .gp_diff = rcu_seq_diff, 450 .deferred_free = rcu_torture_deferred_free, 451 .sync = synchronize_rcu, 452 .exp_sync = synchronize_rcu_expedited, 453 .get_state = get_state_synchronize_rcu, 454 .cond_sync = cond_synchronize_rcu, 455 .call = call_rcu, 456 .cb_barrier = rcu_barrier, 457 .fqs = rcu_force_quiescent_state, 458 .stats = NULL, 459 .stall_dur = rcu_jiffies_till_stall_check, 460 .irq_capable = 1, 461 .can_boost = rcu_can_boost(), 462 .extendables = RCUTORTURE_MAX_EXTEND, 463 .name = "rcu" 464 }; 465 466 /* 467 * Don't even think about trying any of these in real life!!! 468 * The names includes "busted", and they really means it! 469 * The only purpose of these functions is to provide a buggy RCU 470 * implementation to make sure that rcutorture correctly emits 471 * buggy-RCU error messages. 472 */ 473 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 474 { 475 /* This is a deliberate bug for testing purposes only! */ 476 rcu_torture_cb(&p->rtort_rcu); 477 } 478 479 static void synchronize_rcu_busted(void) 480 { 481 /* This is a deliberate bug for testing purposes only! */ 482 } 483 484 static void 485 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 486 { 487 /* This is a deliberate bug for testing purposes only! */ 488 func(head); 489 } 490 491 static struct rcu_torture_ops rcu_busted_ops = { 492 .ttype = INVALID_RCU_FLAVOR, 493 .init = rcu_sync_torture_init, 494 .readlock = rcu_torture_read_lock, 495 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 496 .readunlock = rcu_torture_read_unlock, 497 .get_gp_seq = rcu_no_completed, 498 .deferred_free = rcu_busted_torture_deferred_free, 499 .sync = synchronize_rcu_busted, 500 .exp_sync = synchronize_rcu_busted, 501 .call = call_rcu_busted, 502 .cb_barrier = NULL, 503 .fqs = NULL, 504 .stats = NULL, 505 .irq_capable = 1, 506 .name = "busted" 507 }; 508 509 /* 510 * Definitions for srcu torture testing. 511 */ 512 513 DEFINE_STATIC_SRCU(srcu_ctl); 514 static struct srcu_struct srcu_ctld; 515 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 516 517 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 518 { 519 return srcu_read_lock(srcu_ctlp); 520 } 521 522 static void 523 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 524 { 525 long delay; 526 const long uspertick = 1000000 / HZ; 527 const long longdelay = 10; 528 529 /* We want there to be long-running readers, but not all the time. */ 530 531 delay = torture_random(rrsp) % 532 (nrealreaders * 2 * longdelay * uspertick); 533 if (!delay && in_task()) { 534 schedule_timeout_interruptible(longdelay); 535 rtrsp->rt_delay_jiffies = longdelay; 536 } else { 537 rcu_read_delay(rrsp, rtrsp); 538 } 539 } 540 541 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 542 { 543 srcu_read_unlock(srcu_ctlp, idx); 544 } 545 546 static unsigned long srcu_torture_completed(void) 547 { 548 return srcu_batches_completed(srcu_ctlp); 549 } 550 551 static void srcu_torture_deferred_free(struct rcu_torture *rp) 552 { 553 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 554 } 555 556 static void srcu_torture_synchronize(void) 557 { 558 synchronize_srcu(srcu_ctlp); 559 } 560 561 static void srcu_torture_call(struct rcu_head *head, 562 rcu_callback_t func) 563 { 564 call_srcu(srcu_ctlp, head, func); 565 } 566 567 static void srcu_torture_barrier(void) 568 { 569 srcu_barrier(srcu_ctlp); 570 } 571 572 static void srcu_torture_stats(void) 573 { 574 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 575 } 576 577 static void srcu_torture_synchronize_expedited(void) 578 { 579 synchronize_srcu_expedited(srcu_ctlp); 580 } 581 582 static struct rcu_torture_ops srcu_ops = { 583 .ttype = SRCU_FLAVOR, 584 .init = rcu_sync_torture_init, 585 .readlock = srcu_torture_read_lock, 586 .read_delay = srcu_read_delay, 587 .readunlock = srcu_torture_read_unlock, 588 .get_gp_seq = srcu_torture_completed, 589 .deferred_free = srcu_torture_deferred_free, 590 .sync = srcu_torture_synchronize, 591 .exp_sync = srcu_torture_synchronize_expedited, 592 .call = srcu_torture_call, 593 .cb_barrier = srcu_torture_barrier, 594 .stats = srcu_torture_stats, 595 .irq_capable = 1, 596 .name = "srcu" 597 }; 598 599 static void srcu_torture_init(void) 600 { 601 rcu_sync_torture_init(); 602 WARN_ON(init_srcu_struct(&srcu_ctld)); 603 srcu_ctlp = &srcu_ctld; 604 } 605 606 static void srcu_torture_cleanup(void) 607 { 608 static DEFINE_TORTURE_RANDOM(rand); 609 610 if (torture_random(&rand) & 0x800) 611 cleanup_srcu_struct(&srcu_ctld); 612 else 613 cleanup_srcu_struct_quiesced(&srcu_ctld); 614 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 615 } 616 617 /* As above, but dynamically allocated. */ 618 static struct rcu_torture_ops srcud_ops = { 619 .ttype = SRCU_FLAVOR, 620 .init = srcu_torture_init, 621 .cleanup = srcu_torture_cleanup, 622 .readlock = srcu_torture_read_lock, 623 .read_delay = srcu_read_delay, 624 .readunlock = srcu_torture_read_unlock, 625 .get_gp_seq = srcu_torture_completed, 626 .deferred_free = srcu_torture_deferred_free, 627 .sync = srcu_torture_synchronize, 628 .exp_sync = srcu_torture_synchronize_expedited, 629 .call = srcu_torture_call, 630 .cb_barrier = srcu_torture_barrier, 631 .stats = srcu_torture_stats, 632 .irq_capable = 1, 633 .name = "srcud" 634 }; 635 636 /* As above, but broken due to inappropriate reader extension. */ 637 static struct rcu_torture_ops busted_srcud_ops = { 638 .ttype = SRCU_FLAVOR, 639 .init = srcu_torture_init, 640 .cleanup = srcu_torture_cleanup, 641 .readlock = srcu_torture_read_lock, 642 .read_delay = rcu_read_delay, 643 .readunlock = srcu_torture_read_unlock, 644 .get_gp_seq = srcu_torture_completed, 645 .deferred_free = srcu_torture_deferred_free, 646 .sync = srcu_torture_synchronize, 647 .exp_sync = srcu_torture_synchronize_expedited, 648 .call = srcu_torture_call, 649 .cb_barrier = srcu_torture_barrier, 650 .stats = srcu_torture_stats, 651 .irq_capable = 1, 652 .extendables = RCUTORTURE_MAX_EXTEND, 653 .name = "busted_srcud" 654 }; 655 656 /* 657 * Definitions for RCU-tasks torture testing. 658 */ 659 660 static int tasks_torture_read_lock(void) 661 { 662 return 0; 663 } 664 665 static void tasks_torture_read_unlock(int idx) 666 { 667 } 668 669 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 670 { 671 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 672 } 673 674 static struct rcu_torture_ops tasks_ops = { 675 .ttype = RCU_TASKS_FLAVOR, 676 .init = rcu_sync_torture_init, 677 .readlock = tasks_torture_read_lock, 678 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 679 .readunlock = tasks_torture_read_unlock, 680 .get_gp_seq = rcu_no_completed, 681 .deferred_free = rcu_tasks_torture_deferred_free, 682 .sync = synchronize_rcu_tasks, 683 .exp_sync = synchronize_rcu_tasks, 684 .call = call_rcu_tasks, 685 .cb_barrier = rcu_barrier_tasks, 686 .fqs = NULL, 687 .stats = NULL, 688 .irq_capable = 1, 689 .name = "tasks" 690 }; 691 692 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 693 { 694 if (!cur_ops->gp_diff) 695 return new - old; 696 return cur_ops->gp_diff(new, old); 697 } 698 699 static bool __maybe_unused torturing_tasks(void) 700 { 701 return cur_ops == &tasks_ops; 702 } 703 704 /* 705 * RCU torture priority-boost testing. Runs one real-time thread per 706 * CPU for moderate bursts, repeatedly registering RCU callbacks and 707 * spinning waiting for them to be invoked. If a given callback takes 708 * too long to be invoked, we assume that priority inversion has occurred. 709 */ 710 711 struct rcu_boost_inflight { 712 struct rcu_head rcu; 713 int inflight; 714 }; 715 716 static void rcu_torture_boost_cb(struct rcu_head *head) 717 { 718 struct rcu_boost_inflight *rbip = 719 container_of(head, struct rcu_boost_inflight, rcu); 720 721 /* Ensure RCU-core accesses precede clearing ->inflight */ 722 smp_store_release(&rbip->inflight, 0); 723 } 724 725 static int old_rt_runtime = -1; 726 727 static void rcu_torture_disable_rt_throttle(void) 728 { 729 /* 730 * Disable RT throttling so that rcutorture's boost threads don't get 731 * throttled. Only possible if rcutorture is built-in otherwise the 732 * user should manually do this by setting the sched_rt_period_us and 733 * sched_rt_runtime sysctls. 734 */ 735 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 736 return; 737 738 old_rt_runtime = sysctl_sched_rt_runtime; 739 sysctl_sched_rt_runtime = -1; 740 } 741 742 static void rcu_torture_enable_rt_throttle(void) 743 { 744 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 745 return; 746 747 sysctl_sched_rt_runtime = old_rt_runtime; 748 old_rt_runtime = -1; 749 } 750 751 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 752 { 753 if (end - start > test_boost_duration * HZ - HZ / 2) { 754 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 755 n_rcu_torture_boost_failure++; 756 757 return true; /* failed */ 758 } 759 760 return false; /* passed */ 761 } 762 763 static int rcu_torture_boost(void *arg) 764 { 765 unsigned long call_rcu_time; 766 unsigned long endtime; 767 unsigned long oldstarttime; 768 struct rcu_boost_inflight rbi = { .inflight = 0 }; 769 struct sched_param sp; 770 771 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 772 773 /* Set real-time priority. */ 774 sp.sched_priority = 1; 775 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 776 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 777 n_rcu_torture_boost_rterror++; 778 } 779 780 init_rcu_head_on_stack(&rbi.rcu); 781 /* Each pass through the following loop does one boost-test cycle. */ 782 do { 783 /* Track if the test failed already in this test interval? */ 784 bool failed = false; 785 786 /* Increment n_rcu_torture_boosts once per boost-test */ 787 while (!kthread_should_stop()) { 788 if (mutex_trylock(&boost_mutex)) { 789 n_rcu_torture_boosts++; 790 mutex_unlock(&boost_mutex); 791 break; 792 } 793 schedule_timeout_uninterruptible(1); 794 } 795 if (kthread_should_stop()) 796 goto checkwait; 797 798 /* Wait for the next test interval. */ 799 oldstarttime = boost_starttime; 800 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 801 schedule_timeout_interruptible(oldstarttime - jiffies); 802 stutter_wait("rcu_torture_boost"); 803 if (torture_must_stop()) 804 goto checkwait; 805 } 806 807 /* Do one boost-test interval. */ 808 endtime = oldstarttime + test_boost_duration * HZ; 809 call_rcu_time = jiffies; 810 while (ULONG_CMP_LT(jiffies, endtime)) { 811 /* If we don't have a callback in flight, post one. */ 812 if (!smp_load_acquire(&rbi.inflight)) { 813 /* RCU core before ->inflight = 1. */ 814 smp_store_release(&rbi.inflight, 1); 815 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 816 /* Check if the boost test failed */ 817 failed = failed || 818 rcu_torture_boost_failed(call_rcu_time, 819 jiffies); 820 call_rcu_time = jiffies; 821 } 822 stutter_wait("rcu_torture_boost"); 823 if (torture_must_stop()) 824 goto checkwait; 825 } 826 827 /* 828 * If boost never happened, then inflight will always be 1, in 829 * this case the boost check would never happen in the above 830 * loop so do another one here. 831 */ 832 if (!failed && smp_load_acquire(&rbi.inflight)) 833 rcu_torture_boost_failed(call_rcu_time, jiffies); 834 835 /* 836 * Set the start time of the next test interval. 837 * Yes, this is vulnerable to long delays, but such 838 * delays simply cause a false negative for the next 839 * interval. Besides, we are running at RT priority, 840 * so delays should be relatively rare. 841 */ 842 while (oldstarttime == boost_starttime && 843 !kthread_should_stop()) { 844 if (mutex_trylock(&boost_mutex)) { 845 boost_starttime = jiffies + 846 test_boost_interval * HZ; 847 mutex_unlock(&boost_mutex); 848 break; 849 } 850 schedule_timeout_uninterruptible(1); 851 } 852 853 /* Go do the stutter. */ 854 checkwait: stutter_wait("rcu_torture_boost"); 855 } while (!torture_must_stop()); 856 857 /* Clean up and exit. */ 858 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 859 torture_shutdown_absorb("rcu_torture_boost"); 860 schedule_timeout_uninterruptible(1); 861 } 862 destroy_rcu_head_on_stack(&rbi.rcu); 863 torture_kthread_stopping("rcu_torture_boost"); 864 return 0; 865 } 866 867 /* 868 * RCU torture force-quiescent-state kthread. Repeatedly induces 869 * bursts of calls to force_quiescent_state(), increasing the probability 870 * of occurrence of some important types of race conditions. 871 */ 872 static int 873 rcu_torture_fqs(void *arg) 874 { 875 unsigned long fqs_resume_time; 876 int fqs_burst_remaining; 877 878 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 879 do { 880 fqs_resume_time = jiffies + fqs_stutter * HZ; 881 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 882 !kthread_should_stop()) { 883 schedule_timeout_interruptible(1); 884 } 885 fqs_burst_remaining = fqs_duration; 886 while (fqs_burst_remaining > 0 && 887 !kthread_should_stop()) { 888 cur_ops->fqs(); 889 udelay(fqs_holdoff); 890 fqs_burst_remaining -= fqs_holdoff; 891 } 892 stutter_wait("rcu_torture_fqs"); 893 } while (!torture_must_stop()); 894 torture_kthread_stopping("rcu_torture_fqs"); 895 return 0; 896 } 897 898 /* 899 * RCU torture writer kthread. Repeatedly substitutes a new structure 900 * for that pointed to by rcu_torture_current, freeing the old structure 901 * after a series of grace periods (the "pipeline"). 902 */ 903 static int 904 rcu_torture_writer(void *arg) 905 { 906 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 907 int expediting = 0; 908 unsigned long gp_snap; 909 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 910 bool gp_sync1 = gp_sync; 911 int i; 912 struct rcu_torture *rp; 913 struct rcu_torture *old_rp; 914 static DEFINE_TORTURE_RANDOM(rand); 915 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 916 RTWS_COND_GET, RTWS_SYNC }; 917 int nsynctypes = 0; 918 919 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 920 if (!can_expedite) 921 pr_alert("%s" TORTURE_FLAG 922 " GP expediting controlled from boot/sysfs for %s.\n", 923 torture_type, cur_ops->name); 924 925 /* Initialize synctype[] array. If none set, take default. */ 926 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 927 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 928 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 929 synctype[nsynctypes++] = RTWS_COND_GET; 930 pr_info("%s: Testing conditional GPs.\n", __func__); 931 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 932 pr_alert("%s: gp_cond without primitives.\n", __func__); 933 } 934 if (gp_exp1 && cur_ops->exp_sync) { 935 synctype[nsynctypes++] = RTWS_EXP_SYNC; 936 pr_info("%s: Testing expedited GPs.\n", __func__); 937 } else if (gp_exp && !cur_ops->exp_sync) { 938 pr_alert("%s: gp_exp without primitives.\n", __func__); 939 } 940 if (gp_normal1 && cur_ops->deferred_free) { 941 synctype[nsynctypes++] = RTWS_DEF_FREE; 942 pr_info("%s: Testing asynchronous GPs.\n", __func__); 943 } else if (gp_normal && !cur_ops->deferred_free) { 944 pr_alert("%s: gp_normal without primitives.\n", __func__); 945 } 946 if (gp_sync1 && cur_ops->sync) { 947 synctype[nsynctypes++] = RTWS_SYNC; 948 pr_info("%s: Testing normal GPs.\n", __func__); 949 } else if (gp_sync && !cur_ops->sync) { 950 pr_alert("%s: gp_sync without primitives.\n", __func__); 951 } 952 if (WARN_ONCE(nsynctypes == 0, 953 "rcu_torture_writer: No update-side primitives.\n")) { 954 /* 955 * No updates primitives, so don't try updating. 956 * The resulting test won't be testing much, hence the 957 * above WARN_ONCE(). 958 */ 959 rcu_torture_writer_state = RTWS_STOPPING; 960 torture_kthread_stopping("rcu_torture_writer"); 961 } 962 963 do { 964 rcu_torture_writer_state = RTWS_FIXED_DELAY; 965 schedule_timeout_uninterruptible(1); 966 rp = rcu_torture_alloc(); 967 if (rp == NULL) 968 continue; 969 rp->rtort_pipe_count = 0; 970 rcu_torture_writer_state = RTWS_DELAY; 971 udelay(torture_random(&rand) & 0x3ff); 972 rcu_torture_writer_state = RTWS_REPLACE; 973 old_rp = rcu_dereference_check(rcu_torture_current, 974 current == writer_task); 975 rp->rtort_mbtest = 1; 976 rcu_assign_pointer(rcu_torture_current, rp); 977 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 978 if (old_rp) { 979 i = old_rp->rtort_pipe_count; 980 if (i > RCU_TORTURE_PIPE_LEN) 981 i = RCU_TORTURE_PIPE_LEN; 982 atomic_inc(&rcu_torture_wcount[i]); 983 old_rp->rtort_pipe_count++; 984 switch (synctype[torture_random(&rand) % nsynctypes]) { 985 case RTWS_DEF_FREE: 986 rcu_torture_writer_state = RTWS_DEF_FREE; 987 cur_ops->deferred_free(old_rp); 988 break; 989 case RTWS_EXP_SYNC: 990 rcu_torture_writer_state = RTWS_EXP_SYNC; 991 cur_ops->exp_sync(); 992 rcu_torture_pipe_update(old_rp); 993 break; 994 case RTWS_COND_GET: 995 rcu_torture_writer_state = RTWS_COND_GET; 996 gp_snap = cur_ops->get_state(); 997 i = torture_random(&rand) % 16; 998 if (i != 0) 999 schedule_timeout_interruptible(i); 1000 udelay(torture_random(&rand) % 1000); 1001 rcu_torture_writer_state = RTWS_COND_SYNC; 1002 cur_ops->cond_sync(gp_snap); 1003 rcu_torture_pipe_update(old_rp); 1004 break; 1005 case RTWS_SYNC: 1006 rcu_torture_writer_state = RTWS_SYNC; 1007 cur_ops->sync(); 1008 rcu_torture_pipe_update(old_rp); 1009 break; 1010 default: 1011 WARN_ON_ONCE(1); 1012 break; 1013 } 1014 } 1015 WRITE_ONCE(rcu_torture_current_version, 1016 rcu_torture_current_version + 1); 1017 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1018 if (can_expedite && 1019 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1020 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1021 if (expediting >= 0) 1022 rcu_expedite_gp(); 1023 else 1024 rcu_unexpedite_gp(); 1025 if (++expediting > 3) 1026 expediting = -expediting; 1027 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1028 can_expedite = !rcu_gp_is_expedited() && 1029 !rcu_gp_is_normal(); 1030 } 1031 rcu_torture_writer_state = RTWS_STUTTER; 1032 if (stutter_wait("rcu_torture_writer")) 1033 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1034 if (list_empty(&rcu_tortures[i].rtort_free)) 1035 WARN_ON_ONCE(1); 1036 } while (!torture_must_stop()); 1037 /* Reset expediting back to unexpedited. */ 1038 if (expediting > 0) 1039 expediting = -expediting; 1040 while (can_expedite && expediting++ < 0) 1041 rcu_unexpedite_gp(); 1042 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1043 if (!can_expedite) 1044 pr_alert("%s" TORTURE_FLAG 1045 " Dynamic grace-period expediting was disabled.\n", 1046 torture_type); 1047 rcu_torture_writer_state = RTWS_STOPPING; 1048 torture_kthread_stopping("rcu_torture_writer"); 1049 return 0; 1050 } 1051 1052 /* 1053 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1054 * delay between calls. 1055 */ 1056 static int 1057 rcu_torture_fakewriter(void *arg) 1058 { 1059 DEFINE_TORTURE_RANDOM(rand); 1060 1061 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1062 set_user_nice(current, MAX_NICE); 1063 1064 do { 1065 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1066 udelay(torture_random(&rand) & 0x3ff); 1067 if (cur_ops->cb_barrier != NULL && 1068 torture_random(&rand) % (nfakewriters * 8) == 0) { 1069 cur_ops->cb_barrier(); 1070 } else if (gp_normal == gp_exp) { 1071 if (cur_ops->sync && torture_random(&rand) & 0x80) 1072 cur_ops->sync(); 1073 else if (cur_ops->exp_sync) 1074 cur_ops->exp_sync(); 1075 } else if (gp_normal && cur_ops->sync) { 1076 cur_ops->sync(); 1077 } else if (cur_ops->exp_sync) { 1078 cur_ops->exp_sync(); 1079 } 1080 stutter_wait("rcu_torture_fakewriter"); 1081 } while (!torture_must_stop()); 1082 1083 torture_kthread_stopping("rcu_torture_fakewriter"); 1084 return 0; 1085 } 1086 1087 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1088 { 1089 kfree(rhp); 1090 } 1091 1092 /* 1093 * Do one extension of an RCU read-side critical section using the 1094 * current reader state in readstate (set to zero for initial entry 1095 * to extended critical section), set the new state as specified by 1096 * newstate (set to zero for final exit from extended critical section), 1097 * and random-number-generator state in trsp. If this is neither the 1098 * beginning or end of the critical section and if there was actually a 1099 * change, do a ->read_delay(). 1100 */ 1101 static void rcutorture_one_extend(int *readstate, int newstate, 1102 struct torture_random_state *trsp, 1103 struct rt_read_seg *rtrsp) 1104 { 1105 int idxnew = -1; 1106 int idxold = *readstate; 1107 int statesnew = ~*readstate & newstate; 1108 int statesold = *readstate & ~newstate; 1109 1110 WARN_ON_ONCE(idxold < 0); 1111 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1112 rtrsp->rt_readstate = newstate; 1113 1114 /* First, put new protection in place to avoid critical-section gap. */ 1115 if (statesnew & RCUTORTURE_RDR_BH) 1116 local_bh_disable(); 1117 if (statesnew & RCUTORTURE_RDR_IRQ) 1118 local_irq_disable(); 1119 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1120 preempt_disable(); 1121 if (statesnew & RCUTORTURE_RDR_RBH) 1122 rcu_read_lock_bh(); 1123 if (statesnew & RCUTORTURE_RDR_SCHED) 1124 rcu_read_lock_sched(); 1125 if (statesnew & RCUTORTURE_RDR_RCU) 1126 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1127 1128 /* Next, remove old protection, irq first due to bh conflict. */ 1129 if (statesold & RCUTORTURE_RDR_IRQ) 1130 local_irq_enable(); 1131 if (statesold & RCUTORTURE_RDR_BH) 1132 local_bh_enable(); 1133 if (statesold & RCUTORTURE_RDR_PREEMPT) 1134 preempt_enable(); 1135 if (statesold & RCUTORTURE_RDR_RBH) 1136 rcu_read_unlock_bh(); 1137 if (statesold & RCUTORTURE_RDR_SCHED) 1138 rcu_read_unlock_sched(); 1139 if (statesold & RCUTORTURE_RDR_RCU) 1140 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1141 1142 /* Delay if neither beginning nor end and there was a change. */ 1143 if ((statesnew || statesold) && *readstate && newstate) 1144 cur_ops->read_delay(trsp, rtrsp); 1145 1146 /* Update the reader state. */ 1147 if (idxnew == -1) 1148 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1149 WARN_ON_ONCE(idxnew < 0); 1150 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1151 *readstate = idxnew | newstate; 1152 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1153 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1154 } 1155 1156 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1157 static int rcutorture_extend_mask_max(void) 1158 { 1159 int mask; 1160 1161 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1162 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1163 mask = mask | RCUTORTURE_RDR_RCU; 1164 return mask; 1165 } 1166 1167 /* Return a random protection state mask, but with at least one bit set. */ 1168 static int 1169 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1170 { 1171 int mask = rcutorture_extend_mask_max(); 1172 unsigned long randmask1 = torture_random(trsp) >> 8; 1173 unsigned long randmask2 = randmask1 >> 3; 1174 1175 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1176 /* Most of the time lots of bits, half the time only one bit. */ 1177 if (!(randmask1 & 0x7)) 1178 mask = mask & randmask2; 1179 else 1180 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1181 /* Can't enable bh w/irq disabled. */ 1182 if ((mask & RCUTORTURE_RDR_IRQ) && 1183 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1184 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1185 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1186 if ((mask & RCUTORTURE_RDR_IRQ) && 1187 !(mask & cur_ops->ext_irq_conflict) && 1188 (oldmask & cur_ops->ext_irq_conflict)) 1189 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ 1190 return mask ?: RCUTORTURE_RDR_RCU; 1191 } 1192 1193 /* 1194 * Do a randomly selected number of extensions of an existing RCU read-side 1195 * critical section. 1196 */ 1197 static struct rt_read_seg * 1198 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1199 struct rt_read_seg *rtrsp) 1200 { 1201 int i; 1202 int j; 1203 int mask = rcutorture_extend_mask_max(); 1204 1205 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1206 if (!((mask - 1) & mask)) 1207 return rtrsp; /* Current RCU reader not extendable. */ 1208 /* Bias towards larger numbers of loops. */ 1209 i = (torture_random(trsp) >> 3); 1210 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1211 for (j = 0; j < i; j++) { 1212 mask = rcutorture_extend_mask(*readstate, trsp); 1213 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1214 } 1215 return &rtrsp[j]; 1216 } 1217 1218 /* 1219 * Do one read-side critical section, returning false if there was 1220 * no data to read. Can be invoked both from process context and 1221 * from a timer handler. 1222 */ 1223 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1224 { 1225 int i; 1226 unsigned long started; 1227 unsigned long completed; 1228 int newstate; 1229 struct rcu_torture *p; 1230 int pipe_count; 1231 int readstate = 0; 1232 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1233 struct rt_read_seg *rtrsp = &rtseg[0]; 1234 struct rt_read_seg *rtrsp1; 1235 unsigned long long ts; 1236 1237 newstate = rcutorture_extend_mask(readstate, trsp); 1238 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1239 started = cur_ops->get_gp_seq(); 1240 ts = rcu_trace_clock_local(); 1241 p = rcu_dereference_check(rcu_torture_current, 1242 rcu_read_lock_bh_held() || 1243 rcu_read_lock_sched_held() || 1244 srcu_read_lock_held(srcu_ctlp) || 1245 torturing_tasks()); 1246 if (p == NULL) { 1247 /* Wait for rcu_torture_writer to get underway */ 1248 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1249 return false; 1250 } 1251 if (p->rtort_mbtest == 0) 1252 atomic_inc(&n_rcu_torture_mberror); 1253 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1254 preempt_disable(); 1255 pipe_count = p->rtort_pipe_count; 1256 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1257 /* Should not happen, but... */ 1258 pipe_count = RCU_TORTURE_PIPE_LEN; 1259 } 1260 completed = cur_ops->get_gp_seq(); 1261 if (pipe_count > 1) { 1262 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1263 ts, started, completed); 1264 rcu_ftrace_dump(DUMP_ALL); 1265 } 1266 __this_cpu_inc(rcu_torture_count[pipe_count]); 1267 completed = rcutorture_seq_diff(completed, started); 1268 if (completed > RCU_TORTURE_PIPE_LEN) { 1269 /* Should not happen, but... */ 1270 completed = RCU_TORTURE_PIPE_LEN; 1271 } 1272 __this_cpu_inc(rcu_torture_batch[completed]); 1273 preempt_enable(); 1274 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1275 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1276 1277 /* If error or close call, record the sequence of reader protections. */ 1278 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1279 i = 0; 1280 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1281 err_segs[i++] = *rtrsp1; 1282 rt_read_nsegs = i; 1283 } 1284 1285 return true; 1286 } 1287 1288 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1289 1290 /* 1291 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1292 * incrementing the corresponding element of the pipeline array. The 1293 * counter in the element should never be greater than 1, otherwise, the 1294 * RCU implementation is broken. 1295 */ 1296 static void rcu_torture_timer(struct timer_list *unused) 1297 { 1298 atomic_long_inc(&n_rcu_torture_timers); 1299 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1300 1301 /* Test call_rcu() invocation from interrupt handler. */ 1302 if (cur_ops->call) { 1303 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1304 1305 if (rhp) 1306 cur_ops->call(rhp, rcu_torture_timer_cb); 1307 } 1308 } 1309 1310 /* 1311 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1312 * incrementing the corresponding element of the pipeline array. The 1313 * counter in the element should never be greater than 1, otherwise, the 1314 * RCU implementation is broken. 1315 */ 1316 static int 1317 rcu_torture_reader(void *arg) 1318 { 1319 unsigned long lastsleep = jiffies; 1320 long myid = (long)arg; 1321 int mynumonline = myid; 1322 DEFINE_TORTURE_RANDOM(rand); 1323 struct timer_list t; 1324 1325 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1326 set_user_nice(current, MAX_NICE); 1327 if (irqreader && cur_ops->irq_capable) 1328 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1329 1330 do { 1331 if (irqreader && cur_ops->irq_capable) { 1332 if (!timer_pending(&t)) 1333 mod_timer(&t, jiffies + 1); 1334 } 1335 if (!rcu_torture_one_read(&rand)) 1336 schedule_timeout_interruptible(HZ); 1337 if (time_after(jiffies, lastsleep)) { 1338 schedule_timeout_interruptible(1); 1339 lastsleep = jiffies + 10; 1340 } 1341 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1342 schedule_timeout_interruptible(HZ / 5); 1343 stutter_wait("rcu_torture_reader"); 1344 } while (!torture_must_stop()); 1345 if (irqreader && cur_ops->irq_capable) { 1346 del_timer_sync(&t); 1347 destroy_timer_on_stack(&t); 1348 } 1349 torture_kthread_stopping("rcu_torture_reader"); 1350 return 0; 1351 } 1352 1353 /* 1354 * Print torture statistics. Caller must ensure that there is only 1355 * one call to this function at a given time!!! This is normally 1356 * accomplished by relying on the module system to only have one copy 1357 * of the module loaded, and then by giving the rcu_torture_stats 1358 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1359 * thread is not running). 1360 */ 1361 static void 1362 rcu_torture_stats_print(void) 1363 { 1364 int cpu; 1365 int i; 1366 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1367 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1368 static unsigned long rtcv_snap = ULONG_MAX; 1369 static bool splatted; 1370 struct task_struct *wtp; 1371 1372 for_each_possible_cpu(cpu) { 1373 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1374 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1375 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1376 } 1377 } 1378 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1379 if (pipesummary[i] != 0) 1380 break; 1381 } 1382 1383 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1384 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1385 rcu_torture_current, 1386 rcu_torture_current_version, 1387 list_empty(&rcu_torture_freelist), 1388 atomic_read(&n_rcu_torture_alloc), 1389 atomic_read(&n_rcu_torture_alloc_fail), 1390 atomic_read(&n_rcu_torture_free)); 1391 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1392 atomic_read(&n_rcu_torture_mberror), 1393 n_rcu_torture_barrier_error, 1394 n_rcu_torture_boost_ktrerror, 1395 n_rcu_torture_boost_rterror); 1396 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1397 n_rcu_torture_boost_failure, 1398 n_rcu_torture_boosts, 1399 atomic_long_read(&n_rcu_torture_timers)); 1400 torture_onoff_stats(); 1401 pr_cont("barrier: %ld/%ld:%ld\n", 1402 n_barrier_successes, 1403 n_barrier_attempts, 1404 n_rcu_torture_barrier_error); 1405 1406 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1407 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1408 n_rcu_torture_barrier_error != 0 || 1409 n_rcu_torture_boost_ktrerror != 0 || 1410 n_rcu_torture_boost_rterror != 0 || 1411 n_rcu_torture_boost_failure != 0 || 1412 i > 1) { 1413 pr_cont("%s", "!!! "); 1414 atomic_inc(&n_rcu_torture_error); 1415 WARN_ON_ONCE(1); 1416 } 1417 pr_cont("Reader Pipe: "); 1418 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1419 pr_cont(" %ld", pipesummary[i]); 1420 pr_cont("\n"); 1421 1422 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1423 pr_cont("Reader Batch: "); 1424 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1425 pr_cont(" %ld", batchsummary[i]); 1426 pr_cont("\n"); 1427 1428 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1429 pr_cont("Free-Block Circulation: "); 1430 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1431 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1432 } 1433 pr_cont("\n"); 1434 1435 if (cur_ops->stats) 1436 cur_ops->stats(); 1437 if (rtcv_snap == rcu_torture_current_version && 1438 rcu_torture_current != NULL) { 1439 int __maybe_unused flags = 0; 1440 unsigned long __maybe_unused gp_seq = 0; 1441 1442 rcutorture_get_gp_data(cur_ops->ttype, 1443 &flags, &gp_seq); 1444 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1445 &flags, &gp_seq); 1446 wtp = READ_ONCE(writer_task); 1447 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1448 rcu_torture_writer_state_getname(), 1449 rcu_torture_writer_state, gp_seq, flags, 1450 wtp == NULL ? ~0UL : wtp->state, 1451 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1452 if (!splatted && wtp) { 1453 sched_show_task(wtp); 1454 splatted = true; 1455 } 1456 show_rcu_gp_kthreads(); 1457 rcu_ftrace_dump(DUMP_ALL); 1458 } 1459 rtcv_snap = rcu_torture_current_version; 1460 } 1461 1462 /* 1463 * Periodically prints torture statistics, if periodic statistics printing 1464 * was specified via the stat_interval module parameter. 1465 */ 1466 static int 1467 rcu_torture_stats(void *arg) 1468 { 1469 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1470 do { 1471 schedule_timeout_interruptible(stat_interval * HZ); 1472 rcu_torture_stats_print(); 1473 torture_shutdown_absorb("rcu_torture_stats"); 1474 } while (!torture_must_stop()); 1475 torture_kthread_stopping("rcu_torture_stats"); 1476 return 0; 1477 } 1478 1479 static void 1480 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1481 { 1482 pr_alert("%s" TORTURE_FLAG 1483 "--- %s: nreaders=%d nfakewriters=%d " 1484 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1485 "shuffle_interval=%d stutter=%d irqreader=%d " 1486 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1487 "test_boost=%d/%d test_boost_interval=%d " 1488 "test_boost_duration=%d shutdown_secs=%d " 1489 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1490 "n_barrier_cbs=%d " 1491 "onoff_interval=%d onoff_holdoff=%d\n", 1492 torture_type, tag, nrealreaders, nfakewriters, 1493 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1494 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1495 test_boost, cur_ops->can_boost, 1496 test_boost_interval, test_boost_duration, shutdown_secs, 1497 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1498 n_barrier_cbs, 1499 onoff_interval, onoff_holdoff); 1500 } 1501 1502 static int rcutorture_booster_cleanup(unsigned int cpu) 1503 { 1504 struct task_struct *t; 1505 1506 if (boost_tasks[cpu] == NULL) 1507 return 0; 1508 mutex_lock(&boost_mutex); 1509 t = boost_tasks[cpu]; 1510 boost_tasks[cpu] = NULL; 1511 rcu_torture_enable_rt_throttle(); 1512 mutex_unlock(&boost_mutex); 1513 1514 /* This must be outside of the mutex, otherwise deadlock! */ 1515 torture_stop_kthread(rcu_torture_boost, t); 1516 return 0; 1517 } 1518 1519 static int rcutorture_booster_init(unsigned int cpu) 1520 { 1521 int retval; 1522 1523 if (boost_tasks[cpu] != NULL) 1524 return 0; /* Already created, nothing more to do. */ 1525 1526 /* Don't allow time recalculation while creating a new task. */ 1527 mutex_lock(&boost_mutex); 1528 rcu_torture_disable_rt_throttle(); 1529 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1530 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1531 cpu_to_node(cpu), 1532 "rcu_torture_boost"); 1533 if (IS_ERR(boost_tasks[cpu])) { 1534 retval = PTR_ERR(boost_tasks[cpu]); 1535 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1536 n_rcu_torture_boost_ktrerror++; 1537 boost_tasks[cpu] = NULL; 1538 mutex_unlock(&boost_mutex); 1539 return retval; 1540 } 1541 kthread_bind(boost_tasks[cpu], cpu); 1542 wake_up_process(boost_tasks[cpu]); 1543 mutex_unlock(&boost_mutex); 1544 return 0; 1545 } 1546 1547 /* 1548 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1549 * induces a CPU stall for the time specified by stall_cpu. 1550 */ 1551 static int rcu_torture_stall(void *args) 1552 { 1553 unsigned long stop_at; 1554 1555 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1556 if (stall_cpu_holdoff > 0) { 1557 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1558 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1559 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1560 } 1561 if (!kthread_should_stop()) { 1562 stop_at = ktime_get_seconds() + stall_cpu; 1563 /* RCU CPU stall is expected behavior in following code. */ 1564 rcu_read_lock(); 1565 if (stall_cpu_irqsoff) 1566 local_irq_disable(); 1567 else 1568 preempt_disable(); 1569 pr_alert("rcu_torture_stall start on CPU %d.\n", 1570 smp_processor_id()); 1571 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1572 stop_at)) 1573 continue; /* Induce RCU CPU stall warning. */ 1574 if (stall_cpu_irqsoff) 1575 local_irq_enable(); 1576 else 1577 preempt_enable(); 1578 rcu_read_unlock(); 1579 pr_alert("rcu_torture_stall end.\n"); 1580 } 1581 torture_shutdown_absorb("rcu_torture_stall"); 1582 while (!kthread_should_stop()) 1583 schedule_timeout_interruptible(10 * HZ); 1584 return 0; 1585 } 1586 1587 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1588 static int __init rcu_torture_stall_init(void) 1589 { 1590 if (stall_cpu <= 0) 1591 return 0; 1592 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1593 } 1594 1595 /* State structure for forward-progress self-propagating RCU callback. */ 1596 struct fwd_cb_state { 1597 struct rcu_head rh; 1598 int stop; 1599 }; 1600 1601 /* 1602 * Forward-progress self-propagating RCU callback function. Because 1603 * callbacks run from softirq, this function is an implicit RCU read-side 1604 * critical section. 1605 */ 1606 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1607 { 1608 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1609 1610 if (READ_ONCE(fcsp->stop)) { 1611 WRITE_ONCE(fcsp->stop, 2); 1612 return; 1613 } 1614 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1615 } 1616 1617 /* State for continuous-flood RCU callbacks. */ 1618 struct rcu_fwd_cb { 1619 struct rcu_head rh; 1620 struct rcu_fwd_cb *rfc_next; 1621 int rfc_gps; 1622 }; 1623 static DEFINE_SPINLOCK(rcu_fwd_lock); 1624 static struct rcu_fwd_cb *rcu_fwd_cb_head; 1625 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1626 static long n_launders_cb; 1627 static unsigned long rcu_fwd_startat; 1628 static bool rcu_fwd_emergency_stop; 1629 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1630 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1631 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1632 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1633 static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)]; 1634 1635 static void rcu_torture_fwd_cb_hist(void) 1636 { 1637 int i; 1638 int j; 1639 1640 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1641 if (n_launders_hist[i] > 0) 1642 break; 1643 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1644 __func__, jiffies - rcu_fwd_startat); 1645 for (j = 0; j <= i; j++) 1646 pr_cont(" %ds/%d: %ld", 1647 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]); 1648 pr_cont("\n"); 1649 } 1650 1651 /* Callback function for continuous-flood RCU callbacks. */ 1652 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1653 { 1654 unsigned long flags; 1655 int i; 1656 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1657 struct rcu_fwd_cb **rfcpp; 1658 1659 rfcp->rfc_next = NULL; 1660 rfcp->rfc_gps++; 1661 spin_lock_irqsave(&rcu_fwd_lock, flags); 1662 rfcpp = rcu_fwd_cb_tail; 1663 rcu_fwd_cb_tail = &rfcp->rfc_next; 1664 WRITE_ONCE(*rfcpp, rfcp); 1665 WRITE_ONCE(n_launders_cb, n_launders_cb + 1); 1666 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1667 if (i >= ARRAY_SIZE(n_launders_hist)) 1668 i = ARRAY_SIZE(n_launders_hist) - 1; 1669 n_launders_hist[i]++; 1670 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1671 } 1672 1673 /* 1674 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1675 * test is over or because we hit an OOM event. 1676 */ 1677 static unsigned long rcu_torture_fwd_prog_cbfree(void) 1678 { 1679 unsigned long flags; 1680 unsigned long freed = 0; 1681 struct rcu_fwd_cb *rfcp; 1682 1683 for (;;) { 1684 spin_lock_irqsave(&rcu_fwd_lock, flags); 1685 rfcp = rcu_fwd_cb_head; 1686 if (!rfcp) 1687 break; 1688 rcu_fwd_cb_head = rfcp->rfc_next; 1689 if (!rcu_fwd_cb_head) 1690 rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1691 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1692 kfree(rfcp); 1693 freed++; 1694 } 1695 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1696 return freed; 1697 } 1698 1699 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1700 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) 1701 { 1702 unsigned long cver; 1703 unsigned long dur; 1704 struct fwd_cb_state fcs; 1705 unsigned long gps; 1706 int idx; 1707 int sd; 1708 int sd4; 1709 bool selfpropcb = false; 1710 unsigned long stopat; 1711 static DEFINE_TORTURE_RANDOM(trs); 1712 1713 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1714 init_rcu_head_on_stack(&fcs.rh); 1715 selfpropcb = true; 1716 } 1717 1718 /* Tight loop containing cond_resched(). */ 1719 if (selfpropcb) { 1720 WRITE_ONCE(fcs.stop, 0); 1721 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1722 } 1723 cver = READ_ONCE(rcu_torture_current_version); 1724 gps = cur_ops->get_gp_seq(); 1725 sd = cur_ops->stall_dur() + 1; 1726 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1727 dur = sd4 + torture_random(&trs) % (sd - sd4); 1728 WRITE_ONCE(rcu_fwd_startat, jiffies); 1729 stopat = rcu_fwd_startat + dur; 1730 while (time_before(jiffies, stopat) && 1731 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1732 idx = cur_ops->readlock(); 1733 udelay(10); 1734 cur_ops->readunlock(idx); 1735 if (!fwd_progress_need_resched || need_resched()) 1736 cond_resched(); 1737 } 1738 (*tested_tries)++; 1739 if (!time_before(jiffies, stopat) && 1740 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1741 (*tested)++; 1742 cver = READ_ONCE(rcu_torture_current_version) - cver; 1743 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1744 WARN_ON(!cver && gps < 2); 1745 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1746 } 1747 if (selfpropcb) { 1748 WRITE_ONCE(fcs.stop, 1); 1749 cur_ops->sync(); /* Wait for running CB to complete. */ 1750 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1751 } 1752 1753 if (selfpropcb) { 1754 WARN_ON(READ_ONCE(fcs.stop) != 2); 1755 destroy_rcu_head_on_stack(&fcs.rh); 1756 } 1757 } 1758 1759 /* Carry out call_rcu() forward-progress testing. */ 1760 static void rcu_torture_fwd_prog_cr(void) 1761 { 1762 unsigned long cver; 1763 unsigned long gps; 1764 int i; 1765 long n_launders; 1766 long n_launders_cb_snap; 1767 long n_launders_sa; 1768 long n_max_cbs; 1769 long n_max_gps; 1770 struct rcu_fwd_cb *rfcp; 1771 struct rcu_fwd_cb *rfcpn; 1772 unsigned long stopat; 1773 unsigned long stoppedat; 1774 1775 if (READ_ONCE(rcu_fwd_emergency_stop)) 1776 return; /* Get out of the way quickly, no GP wait! */ 1777 1778 /* Loop continuously posting RCU callbacks. */ 1779 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1780 cur_ops->sync(); /* Later readers see above write. */ 1781 WRITE_ONCE(rcu_fwd_startat, jiffies); 1782 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1783 n_launders = 0; 1784 n_launders_cb = 0; 1785 n_launders_sa = 0; 1786 n_max_cbs = 0; 1787 n_max_gps = 0; 1788 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1789 n_launders_hist[i] = 0; 1790 cver = READ_ONCE(rcu_torture_current_version); 1791 gps = cur_ops->get_gp_seq(); 1792 while (time_before(jiffies, stopat) && 1793 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1794 rfcp = READ_ONCE(rcu_fwd_cb_head); 1795 rfcpn = NULL; 1796 if (rfcp) 1797 rfcpn = READ_ONCE(rfcp->rfc_next); 1798 if (rfcpn) { 1799 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1800 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1801 break; 1802 rcu_fwd_cb_head = rfcpn; 1803 n_launders++; 1804 n_launders_sa++; 1805 } else { 1806 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1807 if (WARN_ON_ONCE(!rfcp)) { 1808 schedule_timeout_interruptible(1); 1809 continue; 1810 } 1811 n_max_cbs++; 1812 n_launders_sa = 0; 1813 rfcp->rfc_gps = 0; 1814 } 1815 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1816 cond_resched(); 1817 } 1818 stoppedat = jiffies; 1819 n_launders_cb_snap = READ_ONCE(n_launders_cb); 1820 cver = READ_ONCE(rcu_torture_current_version) - cver; 1821 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1822 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1823 (void)rcu_torture_fwd_prog_cbfree(); 1824 1825 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1826 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) { 1827 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1828 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1829 __func__, 1830 stoppedat - rcu_fwd_startat, jiffies - stoppedat, 1831 n_launders + n_max_cbs - n_launders_cb_snap, 1832 n_launders, n_launders_sa, 1833 n_max_gps, n_max_cbs, cver, gps); 1834 rcu_torture_fwd_cb_hist(); 1835 } 1836 } 1837 1838 1839 /* 1840 * OOM notifier, but this only prints diagnostic information for the 1841 * current forward-progress test. 1842 */ 1843 static int rcutorture_oom_notify(struct notifier_block *self, 1844 unsigned long notused, void *nfreed) 1845 { 1846 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 1847 __func__); 1848 rcu_torture_fwd_cb_hist(); 1849 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2)); 1850 WRITE_ONCE(rcu_fwd_emergency_stop, true); 1851 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 1852 pr_info("%s: Freed %lu RCU callbacks.\n", 1853 __func__, rcu_torture_fwd_prog_cbfree()); 1854 rcu_barrier(); 1855 pr_info("%s: Freed %lu RCU callbacks.\n", 1856 __func__, rcu_torture_fwd_prog_cbfree()); 1857 rcu_barrier(); 1858 pr_info("%s: Freed %lu RCU callbacks.\n", 1859 __func__, rcu_torture_fwd_prog_cbfree()); 1860 smp_mb(); /* Frees before return to avoid redoing OOM. */ 1861 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 1862 pr_info("%s returning after OOM processing.\n", __func__); 1863 return NOTIFY_OK; 1864 } 1865 1866 static struct notifier_block rcutorture_oom_nb = { 1867 .notifier_call = rcutorture_oom_notify 1868 }; 1869 1870 /* Carry out grace-period forward-progress testing. */ 1871 static int rcu_torture_fwd_prog(void *args) 1872 { 1873 int tested = 0; 1874 int tested_tries = 0; 1875 1876 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 1877 rcu_bind_current_to_nocb(); 1878 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 1879 set_user_nice(current, MAX_NICE); 1880 do { 1881 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 1882 WRITE_ONCE(rcu_fwd_emergency_stop, false); 1883 register_oom_notifier(&rcutorture_oom_nb); 1884 rcu_torture_fwd_prog_nr(&tested, &tested_tries); 1885 rcu_torture_fwd_prog_cr(); 1886 unregister_oom_notifier(&rcutorture_oom_nb); 1887 1888 /* Avoid slow periods, better to test when busy. */ 1889 stutter_wait("rcu_torture_fwd_prog"); 1890 } while (!torture_must_stop()); 1891 /* Short runs might not contain a valid forward-progress attempt. */ 1892 WARN_ON(!tested && tested_tries >= 5); 1893 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 1894 torture_kthread_stopping("rcu_torture_fwd_prog"); 1895 return 0; 1896 } 1897 1898 /* If forward-progress checking is requested and feasible, spawn the thread. */ 1899 static int __init rcu_torture_fwd_prog_init(void) 1900 { 1901 if (!fwd_progress) 1902 return 0; /* Not requested, so don't do it. */ 1903 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 1904 cur_ops == &rcu_busted_ops) { 1905 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 1906 return 0; 1907 } 1908 if (stall_cpu > 0) { 1909 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 1910 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 1911 return -EINVAL; /* In module, can fail back to user. */ 1912 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 1913 return 0; 1914 } 1915 if (fwd_progress_holdoff <= 0) 1916 fwd_progress_holdoff = 1; 1917 if (fwd_progress_div <= 0) 1918 fwd_progress_div = 4; 1919 return torture_create_kthread(rcu_torture_fwd_prog, 1920 NULL, fwd_prog_task); 1921 } 1922 1923 /* Callback function for RCU barrier testing. */ 1924 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1925 { 1926 atomic_inc(&barrier_cbs_invoked); 1927 } 1928 1929 /* kthread function to register callbacks used to test RCU barriers. */ 1930 static int rcu_torture_barrier_cbs(void *arg) 1931 { 1932 long myid = (long)arg; 1933 bool lastphase = 0; 1934 bool newphase; 1935 struct rcu_head rcu; 1936 1937 init_rcu_head_on_stack(&rcu); 1938 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 1939 set_user_nice(current, MAX_NICE); 1940 do { 1941 wait_event(barrier_cbs_wq[myid], 1942 (newphase = 1943 smp_load_acquire(&barrier_phase)) != lastphase || 1944 torture_must_stop()); 1945 lastphase = newphase; 1946 if (torture_must_stop()) 1947 break; 1948 /* 1949 * The above smp_load_acquire() ensures barrier_phase load 1950 * is ordered before the following ->call(). 1951 */ 1952 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 1953 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1954 local_irq_enable(); 1955 if (atomic_dec_and_test(&barrier_cbs_count)) 1956 wake_up(&barrier_wq); 1957 } while (!torture_must_stop()); 1958 if (cur_ops->cb_barrier != NULL) 1959 cur_ops->cb_barrier(); 1960 destroy_rcu_head_on_stack(&rcu); 1961 torture_kthread_stopping("rcu_torture_barrier_cbs"); 1962 return 0; 1963 } 1964 1965 /* kthread function to drive and coordinate RCU barrier testing. */ 1966 static int rcu_torture_barrier(void *arg) 1967 { 1968 int i; 1969 1970 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 1971 do { 1972 atomic_set(&barrier_cbs_invoked, 0); 1973 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1974 /* Ensure barrier_phase ordered after prior assignments. */ 1975 smp_store_release(&barrier_phase, !barrier_phase); 1976 for (i = 0; i < n_barrier_cbs; i++) 1977 wake_up(&barrier_cbs_wq[i]); 1978 wait_event(barrier_wq, 1979 atomic_read(&barrier_cbs_count) == 0 || 1980 torture_must_stop()); 1981 if (torture_must_stop()) 1982 break; 1983 n_barrier_attempts++; 1984 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1985 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1986 n_rcu_torture_barrier_error++; 1987 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 1988 atomic_read(&barrier_cbs_invoked), 1989 n_barrier_cbs); 1990 WARN_ON_ONCE(1); 1991 } else { 1992 n_barrier_successes++; 1993 } 1994 schedule_timeout_interruptible(HZ / 10); 1995 } while (!torture_must_stop()); 1996 torture_kthread_stopping("rcu_torture_barrier"); 1997 return 0; 1998 } 1999 2000 /* Initialize RCU barrier testing. */ 2001 static int rcu_torture_barrier_init(void) 2002 { 2003 int i; 2004 int ret; 2005 2006 if (n_barrier_cbs <= 0) 2007 return 0; 2008 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2009 pr_alert("%s" TORTURE_FLAG 2010 " Call or barrier ops missing for %s,\n", 2011 torture_type, cur_ops->name); 2012 pr_alert("%s" TORTURE_FLAG 2013 " RCU barrier testing omitted from run.\n", 2014 torture_type); 2015 return 0; 2016 } 2017 atomic_set(&barrier_cbs_count, 0); 2018 atomic_set(&barrier_cbs_invoked, 0); 2019 barrier_cbs_tasks = 2020 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2021 GFP_KERNEL); 2022 barrier_cbs_wq = 2023 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2024 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2025 return -ENOMEM; 2026 for (i = 0; i < n_barrier_cbs; i++) { 2027 init_waitqueue_head(&barrier_cbs_wq[i]); 2028 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2029 (void *)(long)i, 2030 barrier_cbs_tasks[i]); 2031 if (ret) 2032 return ret; 2033 } 2034 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2035 } 2036 2037 /* Clean up after RCU barrier testing. */ 2038 static void rcu_torture_barrier_cleanup(void) 2039 { 2040 int i; 2041 2042 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2043 if (barrier_cbs_tasks != NULL) { 2044 for (i = 0; i < n_barrier_cbs; i++) 2045 torture_stop_kthread(rcu_torture_barrier_cbs, 2046 barrier_cbs_tasks[i]); 2047 kfree(barrier_cbs_tasks); 2048 barrier_cbs_tasks = NULL; 2049 } 2050 if (barrier_cbs_wq != NULL) { 2051 kfree(barrier_cbs_wq); 2052 barrier_cbs_wq = NULL; 2053 } 2054 } 2055 2056 static bool rcu_torture_can_boost(void) 2057 { 2058 static int boost_warn_once; 2059 int prio; 2060 2061 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2062 return false; 2063 2064 prio = rcu_get_gp_kthreads_prio(); 2065 if (!prio) 2066 return false; 2067 2068 if (prio < 2) { 2069 if (boost_warn_once == 1) 2070 return false; 2071 2072 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2073 boost_warn_once = 1; 2074 return false; 2075 } 2076 2077 return true; 2078 } 2079 2080 static enum cpuhp_state rcutor_hp; 2081 2082 static void 2083 rcu_torture_cleanup(void) 2084 { 2085 int firsttime; 2086 int flags = 0; 2087 unsigned long gp_seq = 0; 2088 int i; 2089 2090 if (torture_cleanup_begin()) { 2091 if (cur_ops->cb_barrier != NULL) 2092 cur_ops->cb_barrier(); 2093 return; 2094 } 2095 2096 rcu_torture_barrier_cleanup(); 2097 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2098 torture_stop_kthread(rcu_torture_stall, stall_task); 2099 torture_stop_kthread(rcu_torture_writer, writer_task); 2100 2101 if (reader_tasks) { 2102 for (i = 0; i < nrealreaders; i++) 2103 torture_stop_kthread(rcu_torture_reader, 2104 reader_tasks[i]); 2105 kfree(reader_tasks); 2106 } 2107 rcu_torture_current = NULL; 2108 2109 if (fakewriter_tasks) { 2110 for (i = 0; i < nfakewriters; i++) { 2111 torture_stop_kthread(rcu_torture_fakewriter, 2112 fakewriter_tasks[i]); 2113 } 2114 kfree(fakewriter_tasks); 2115 fakewriter_tasks = NULL; 2116 } 2117 2118 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2119 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2120 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2121 cur_ops->name, gp_seq, flags); 2122 torture_stop_kthread(rcu_torture_stats, stats_task); 2123 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2124 if (rcu_torture_can_boost()) 2125 cpuhp_remove_state(rcutor_hp); 2126 2127 /* 2128 * Wait for all RCU callbacks to fire, then do torture-type-specific 2129 * cleanup operations. 2130 */ 2131 if (cur_ops->cb_barrier != NULL) 2132 cur_ops->cb_barrier(); 2133 if (cur_ops->cleanup != NULL) 2134 cur_ops->cleanup(); 2135 2136 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2137 2138 if (err_segs_recorded) { 2139 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2140 if (rt_read_nsegs == 0) 2141 pr_alert("\t: No segments recorded!!!\n"); 2142 firsttime = 1; 2143 for (i = 0; i < rt_read_nsegs; i++) { 2144 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2145 if (err_segs[i].rt_delay_jiffies != 0) { 2146 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2147 err_segs[i].rt_delay_jiffies); 2148 firsttime = 0; 2149 } 2150 if (err_segs[i].rt_delay_ms != 0) { 2151 pr_cont("%s%ldms", firsttime ? "" : "+", 2152 err_segs[i].rt_delay_ms); 2153 firsttime = 0; 2154 } 2155 if (err_segs[i].rt_delay_us != 0) { 2156 pr_cont("%s%ldus", firsttime ? "" : "+", 2157 err_segs[i].rt_delay_us); 2158 firsttime = 0; 2159 } 2160 pr_cont("%s\n", 2161 err_segs[i].rt_preempted ? "preempted" : ""); 2162 2163 } 2164 } 2165 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2166 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2167 else if (torture_onoff_failures()) 2168 rcu_torture_print_module_parms(cur_ops, 2169 "End of test: RCU_HOTPLUG"); 2170 else 2171 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2172 torture_cleanup_end(); 2173 } 2174 2175 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2176 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2177 { 2178 } 2179 2180 static void rcu_torture_err_cb(struct rcu_head *rhp) 2181 { 2182 /* 2183 * This -might- happen due to race conditions, but is unlikely. 2184 * The scenario that leads to this happening is that the 2185 * first of the pair of duplicate callbacks is queued, 2186 * someone else starts a grace period that includes that 2187 * callback, then the second of the pair must wait for the 2188 * next grace period. Unlikely, but can happen. If it 2189 * does happen, the debug-objects subsystem won't have splatted. 2190 */ 2191 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2192 } 2193 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2194 2195 /* 2196 * Verify that double-free causes debug-objects to complain, but only 2197 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2198 * cannot be carried out. 2199 */ 2200 static void rcu_test_debug_objects(void) 2201 { 2202 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2203 struct rcu_head rh1; 2204 struct rcu_head rh2; 2205 2206 init_rcu_head_on_stack(&rh1); 2207 init_rcu_head_on_stack(&rh2); 2208 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2209 2210 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2211 preempt_disable(); /* Prevent preemption from interrupting test. */ 2212 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2213 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2214 local_irq_disable(); /* Make it harder to start a new grace period. */ 2215 call_rcu(&rh2, rcu_torture_leak_cb); 2216 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2217 local_irq_enable(); 2218 rcu_read_unlock(); 2219 preempt_enable(); 2220 2221 /* Wait for them all to get done so we can safely return. */ 2222 rcu_barrier(); 2223 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2224 destroy_rcu_head_on_stack(&rh1); 2225 destroy_rcu_head_on_stack(&rh2); 2226 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2227 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2228 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2229 } 2230 2231 static int __init 2232 rcu_torture_init(void) 2233 { 2234 long i; 2235 int cpu; 2236 int firsterr = 0; 2237 static struct rcu_torture_ops *torture_ops[] = { 2238 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2239 &busted_srcud_ops, &tasks_ops, 2240 }; 2241 2242 if (!torture_init_begin(torture_type, verbose)) 2243 return -EBUSY; 2244 2245 /* Process args and tell the world that the torturer is on the job. */ 2246 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2247 cur_ops = torture_ops[i]; 2248 if (strcmp(torture_type, cur_ops->name) == 0) 2249 break; 2250 } 2251 if (i == ARRAY_SIZE(torture_ops)) { 2252 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2253 torture_type); 2254 pr_alert("rcu-torture types:"); 2255 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2256 pr_cont(" %s", torture_ops[i]->name); 2257 pr_cont("\n"); 2258 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2259 firsterr = -EINVAL; 2260 goto unwind; 2261 } 2262 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2263 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2264 fqs_duration = 0; 2265 } 2266 if (cur_ops->init) 2267 cur_ops->init(); 2268 2269 if (nreaders >= 0) { 2270 nrealreaders = nreaders; 2271 } else { 2272 nrealreaders = num_online_cpus() - 2 - nreaders; 2273 if (nrealreaders <= 0) 2274 nrealreaders = 1; 2275 } 2276 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2277 2278 /* Set up the freelist. */ 2279 2280 INIT_LIST_HEAD(&rcu_torture_freelist); 2281 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2282 rcu_tortures[i].rtort_mbtest = 0; 2283 list_add_tail(&rcu_tortures[i].rtort_free, 2284 &rcu_torture_freelist); 2285 } 2286 2287 /* Initialize the statistics so that each run gets its own numbers. */ 2288 2289 rcu_torture_current = NULL; 2290 rcu_torture_current_version = 0; 2291 atomic_set(&n_rcu_torture_alloc, 0); 2292 atomic_set(&n_rcu_torture_alloc_fail, 0); 2293 atomic_set(&n_rcu_torture_free, 0); 2294 atomic_set(&n_rcu_torture_mberror, 0); 2295 atomic_set(&n_rcu_torture_error, 0); 2296 n_rcu_torture_barrier_error = 0; 2297 n_rcu_torture_boost_ktrerror = 0; 2298 n_rcu_torture_boost_rterror = 0; 2299 n_rcu_torture_boost_failure = 0; 2300 n_rcu_torture_boosts = 0; 2301 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2302 atomic_set(&rcu_torture_wcount[i], 0); 2303 for_each_possible_cpu(cpu) { 2304 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2305 per_cpu(rcu_torture_count, cpu)[i] = 0; 2306 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2307 } 2308 } 2309 err_segs_recorded = 0; 2310 rt_read_nsegs = 0; 2311 2312 /* Start up the kthreads. */ 2313 2314 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2315 writer_task); 2316 if (firsterr) 2317 goto unwind; 2318 if (nfakewriters > 0) { 2319 fakewriter_tasks = kcalloc(nfakewriters, 2320 sizeof(fakewriter_tasks[0]), 2321 GFP_KERNEL); 2322 if (fakewriter_tasks == NULL) { 2323 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2324 firsterr = -ENOMEM; 2325 goto unwind; 2326 } 2327 } 2328 for (i = 0; i < nfakewriters; i++) { 2329 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2330 NULL, fakewriter_tasks[i]); 2331 if (firsterr) 2332 goto unwind; 2333 } 2334 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2335 GFP_KERNEL); 2336 if (reader_tasks == NULL) { 2337 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2338 firsterr = -ENOMEM; 2339 goto unwind; 2340 } 2341 for (i = 0; i < nrealreaders; i++) { 2342 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2343 reader_tasks[i]); 2344 if (firsterr) 2345 goto unwind; 2346 } 2347 if (stat_interval > 0) { 2348 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2349 stats_task); 2350 if (firsterr) 2351 goto unwind; 2352 } 2353 if (test_no_idle_hz && shuffle_interval > 0) { 2354 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2355 if (firsterr) 2356 goto unwind; 2357 } 2358 if (stutter < 0) 2359 stutter = 0; 2360 if (stutter) { 2361 firsterr = torture_stutter_init(stutter * HZ); 2362 if (firsterr) 2363 goto unwind; 2364 } 2365 if (fqs_duration < 0) 2366 fqs_duration = 0; 2367 if (fqs_duration) { 2368 /* Create the fqs thread */ 2369 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2370 fqs_task); 2371 if (firsterr) 2372 goto unwind; 2373 } 2374 if (test_boost_interval < 1) 2375 test_boost_interval = 1; 2376 if (test_boost_duration < 2) 2377 test_boost_duration = 2; 2378 if (rcu_torture_can_boost()) { 2379 2380 boost_starttime = jiffies + test_boost_interval * HZ; 2381 2382 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2383 rcutorture_booster_init, 2384 rcutorture_booster_cleanup); 2385 if (firsterr < 0) 2386 goto unwind; 2387 rcutor_hp = firsterr; 2388 } 2389 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2390 if (firsterr) 2391 goto unwind; 2392 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); 2393 if (firsterr) 2394 goto unwind; 2395 firsterr = rcu_torture_stall_init(); 2396 if (firsterr) 2397 goto unwind; 2398 firsterr = rcu_torture_fwd_prog_init(); 2399 if (firsterr) 2400 goto unwind; 2401 firsterr = rcu_torture_barrier_init(); 2402 if (firsterr) 2403 goto unwind; 2404 if (object_debug) 2405 rcu_test_debug_objects(); 2406 torture_init_end(); 2407 return 0; 2408 2409 unwind: 2410 torture_init_end(); 2411 rcu_torture_cleanup(); 2412 return firsterr; 2413 } 2414 2415 module_init(rcu_torture_init); 2416 module_exit(rcu_torture_cleanup); 2417