1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 48 #include "rcu.h" 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 52 53 54 /* Bits for ->extendables field, extendables param, and related definitions. */ 55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 64 #define RCUTORTURE_MAX_EXTEND \ 65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 68 /* Must be power of two minus one. */ 69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 70 71 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 73 torture_param(int, fqs_duration, 0, 74 "Duration of fqs bursts (us), 0 to disable"); 75 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 76 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 77 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 78 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 79 torture_param(int, fwd_progress_holdoff, 60, 80 "Time between forward-progress tests (s)"); 81 torture_param(bool, fwd_progress_need_resched, 1, 82 "Hide cond_resched() behind need_resched()"); 83 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 84 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 85 torture_param(bool, gp_normal, false, 86 "Use normal (non-expedited) GP wait primitives"); 87 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 88 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 89 torture_param(int, n_barrier_cbs, 0, 90 "# of callbacks/kthreads for barrier testing"); 91 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 92 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 93 torture_param(int, object_debug, 0, 94 "Enable debug-object double call_rcu() testing"); 95 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 96 torture_param(int, onoff_interval, 0, 97 "Time between CPU hotplugs (jiffies), 0=disable"); 98 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 99 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 100 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 101 torture_param(int, stall_cpu_holdoff, 10, 102 "Time to wait before starting stall (s)."); 103 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 104 torture_param(int, stat_interval, 60, 105 "Number of seconds between stats printk()s"); 106 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 107 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 108 torture_param(int, test_boost_duration, 4, 109 "Duration of each boost test, seconds."); 110 torture_param(int, test_boost_interval, 7, 111 "Interval between boost tests, seconds."); 112 torture_param(bool, test_no_idle_hz, true, 113 "Test support for tickless idle CPUs"); 114 torture_param(int, verbose, 1, 115 "Enable verbose debugging printk()s"); 116 117 static char *torture_type = "rcu"; 118 module_param(torture_type, charp, 0444); 119 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 120 121 static int nrealreaders; 122 static struct task_struct *writer_task; 123 static struct task_struct **fakewriter_tasks; 124 static struct task_struct **reader_tasks; 125 static struct task_struct *stats_task; 126 static struct task_struct *fqs_task; 127 static struct task_struct *boost_tasks[NR_CPUS]; 128 static struct task_struct *stall_task; 129 static struct task_struct *fwd_prog_task; 130 static struct task_struct **barrier_cbs_tasks; 131 static struct task_struct *barrier_task; 132 133 #define RCU_TORTURE_PIPE_LEN 10 134 135 struct rcu_torture { 136 struct rcu_head rtort_rcu; 137 int rtort_pipe_count; 138 struct list_head rtort_free; 139 int rtort_mbtest; 140 }; 141 142 static LIST_HEAD(rcu_torture_freelist); 143 static struct rcu_torture __rcu *rcu_torture_current; 144 static unsigned long rcu_torture_current_version; 145 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 146 static DEFINE_SPINLOCK(rcu_torture_lock); 147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 149 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 150 static atomic_t n_rcu_torture_alloc; 151 static atomic_t n_rcu_torture_alloc_fail; 152 static atomic_t n_rcu_torture_free; 153 static atomic_t n_rcu_torture_mberror; 154 static atomic_t n_rcu_torture_error; 155 static long n_rcu_torture_barrier_error; 156 static long n_rcu_torture_boost_ktrerror; 157 static long n_rcu_torture_boost_rterror; 158 static long n_rcu_torture_boost_failure; 159 static long n_rcu_torture_boosts; 160 static atomic_long_t n_rcu_torture_timers; 161 static long n_barrier_attempts; 162 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 163 static struct list_head rcu_torture_removed; 164 165 static int rcu_torture_writer_state; 166 #define RTWS_FIXED_DELAY 0 167 #define RTWS_DELAY 1 168 #define RTWS_REPLACE 2 169 #define RTWS_DEF_FREE 3 170 #define RTWS_EXP_SYNC 4 171 #define RTWS_COND_GET 5 172 #define RTWS_COND_SYNC 6 173 #define RTWS_SYNC 7 174 #define RTWS_STUTTER 8 175 #define RTWS_STOPPING 9 176 static const char * const rcu_torture_writer_state_names[] = { 177 "RTWS_FIXED_DELAY", 178 "RTWS_DELAY", 179 "RTWS_REPLACE", 180 "RTWS_DEF_FREE", 181 "RTWS_EXP_SYNC", 182 "RTWS_COND_GET", 183 "RTWS_COND_SYNC", 184 "RTWS_SYNC", 185 "RTWS_STUTTER", 186 "RTWS_STOPPING", 187 }; 188 189 /* Record reader segment types and duration for first failing read. */ 190 struct rt_read_seg { 191 int rt_readstate; 192 unsigned long rt_delay_jiffies; 193 unsigned long rt_delay_ms; 194 unsigned long rt_delay_us; 195 bool rt_preempted; 196 }; 197 static int err_segs_recorded; 198 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 199 static int rt_read_nsegs; 200 201 static const char *rcu_torture_writer_state_getname(void) 202 { 203 unsigned int i = READ_ONCE(rcu_torture_writer_state); 204 205 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 206 return "???"; 207 return rcu_torture_writer_state_names[i]; 208 } 209 210 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 211 #define rcu_can_boost() 1 212 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 213 #define rcu_can_boost() 0 214 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 215 216 #ifdef CONFIG_RCU_TRACE 217 static u64 notrace rcu_trace_clock_local(void) 218 { 219 u64 ts = trace_clock_local(); 220 221 (void)do_div(ts, NSEC_PER_USEC); 222 return ts; 223 } 224 #else /* #ifdef CONFIG_RCU_TRACE */ 225 static u64 notrace rcu_trace_clock_local(void) 226 { 227 return 0ULL; 228 } 229 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 230 231 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 232 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 233 /* and boost task create/destroy. */ 234 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 235 static bool barrier_phase; /* Test phase. */ 236 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 237 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 238 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 239 240 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 241 242 /* 243 * Allocate an element from the rcu_tortures pool. 244 */ 245 static struct rcu_torture * 246 rcu_torture_alloc(void) 247 { 248 struct list_head *p; 249 250 spin_lock_bh(&rcu_torture_lock); 251 if (list_empty(&rcu_torture_freelist)) { 252 atomic_inc(&n_rcu_torture_alloc_fail); 253 spin_unlock_bh(&rcu_torture_lock); 254 return NULL; 255 } 256 atomic_inc(&n_rcu_torture_alloc); 257 p = rcu_torture_freelist.next; 258 list_del_init(p); 259 spin_unlock_bh(&rcu_torture_lock); 260 return container_of(p, struct rcu_torture, rtort_free); 261 } 262 263 /* 264 * Free an element to the rcu_tortures pool. 265 */ 266 static void 267 rcu_torture_free(struct rcu_torture *p) 268 { 269 atomic_inc(&n_rcu_torture_free); 270 spin_lock_bh(&rcu_torture_lock); 271 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 272 spin_unlock_bh(&rcu_torture_lock); 273 } 274 275 /* 276 * Operations vector for selecting different types of tests. 277 */ 278 279 struct rcu_torture_ops { 280 int ttype; 281 void (*init)(void); 282 void (*cleanup)(void); 283 int (*readlock)(void); 284 void (*read_delay)(struct torture_random_state *rrsp, 285 struct rt_read_seg *rtrsp); 286 void (*readunlock)(int idx); 287 unsigned long (*get_gp_seq)(void); 288 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 289 void (*deferred_free)(struct rcu_torture *p); 290 void (*sync)(void); 291 void (*exp_sync)(void); 292 unsigned long (*get_state)(void); 293 void (*cond_sync)(unsigned long oldstate); 294 call_rcu_func_t call; 295 void (*cb_barrier)(void); 296 void (*fqs)(void); 297 void (*stats)(void); 298 int (*stall_dur)(void); 299 int irq_capable; 300 int can_boost; 301 int extendables; 302 int ext_irq_conflict; 303 const char *name; 304 }; 305 306 static struct rcu_torture_ops *cur_ops; 307 308 /* 309 * Definitions for rcu torture testing. 310 */ 311 312 static int rcu_torture_read_lock(void) __acquires(RCU) 313 { 314 rcu_read_lock(); 315 return 0; 316 } 317 318 static void 319 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 320 { 321 unsigned long started; 322 unsigned long completed; 323 const unsigned long shortdelay_us = 200; 324 unsigned long longdelay_ms = 300; 325 unsigned long long ts; 326 327 /* We want a short delay sometimes to make a reader delay the grace 328 * period, and we want a long delay occasionally to trigger 329 * force_quiescent_state. */ 330 331 if (!rcu_fwd_cb_nodelay && 332 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 333 started = cur_ops->get_gp_seq(); 334 ts = rcu_trace_clock_local(); 335 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 336 longdelay_ms = 5; /* Avoid triggering BH limits. */ 337 mdelay(longdelay_ms); 338 rtrsp->rt_delay_ms = longdelay_ms; 339 completed = cur_ops->get_gp_seq(); 340 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 341 started, completed); 342 } 343 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 344 udelay(shortdelay_us); 345 rtrsp->rt_delay_us = shortdelay_us; 346 } 347 if (!preempt_count() && 348 !(torture_random(rrsp) % (nrealreaders * 500))) { 349 torture_preempt_schedule(); /* QS only if preemptible. */ 350 rtrsp->rt_preempted = true; 351 } 352 } 353 354 static void rcu_torture_read_unlock(int idx) __releases(RCU) 355 { 356 rcu_read_unlock(); 357 } 358 359 /* 360 * Update callback in the pipe. This should be invoked after a grace period. 361 */ 362 static bool 363 rcu_torture_pipe_update_one(struct rcu_torture *rp) 364 { 365 int i; 366 367 i = rp->rtort_pipe_count; 368 if (i > RCU_TORTURE_PIPE_LEN) 369 i = RCU_TORTURE_PIPE_LEN; 370 atomic_inc(&rcu_torture_wcount[i]); 371 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 372 rp->rtort_mbtest = 0; 373 return true; 374 } 375 return false; 376 } 377 378 /* 379 * Update all callbacks in the pipe. Suitable for synchronous grace-period 380 * primitives. 381 */ 382 static void 383 rcu_torture_pipe_update(struct rcu_torture *old_rp) 384 { 385 struct rcu_torture *rp; 386 struct rcu_torture *rp1; 387 388 if (old_rp) 389 list_add(&old_rp->rtort_free, &rcu_torture_removed); 390 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 391 if (rcu_torture_pipe_update_one(rp)) { 392 list_del(&rp->rtort_free); 393 rcu_torture_free(rp); 394 } 395 } 396 } 397 398 static void 399 rcu_torture_cb(struct rcu_head *p) 400 { 401 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 402 403 if (torture_must_stop_irq()) { 404 /* Test is ending, just drop callbacks on the floor. */ 405 /* The next initialization will pick up the pieces. */ 406 return; 407 } 408 if (rcu_torture_pipe_update_one(rp)) 409 rcu_torture_free(rp); 410 else 411 cur_ops->deferred_free(rp); 412 } 413 414 static unsigned long rcu_no_completed(void) 415 { 416 return 0; 417 } 418 419 static void rcu_torture_deferred_free(struct rcu_torture *p) 420 { 421 call_rcu(&p->rtort_rcu, rcu_torture_cb); 422 } 423 424 static void rcu_sync_torture_init(void) 425 { 426 INIT_LIST_HEAD(&rcu_torture_removed); 427 } 428 429 static struct rcu_torture_ops rcu_ops = { 430 .ttype = RCU_FLAVOR, 431 .init = rcu_sync_torture_init, 432 .readlock = rcu_torture_read_lock, 433 .read_delay = rcu_read_delay, 434 .readunlock = rcu_torture_read_unlock, 435 .get_gp_seq = rcu_get_gp_seq, 436 .gp_diff = rcu_seq_diff, 437 .deferred_free = rcu_torture_deferred_free, 438 .sync = synchronize_rcu, 439 .exp_sync = synchronize_rcu_expedited, 440 .get_state = get_state_synchronize_rcu, 441 .cond_sync = cond_synchronize_rcu, 442 .call = call_rcu, 443 .cb_barrier = rcu_barrier, 444 .fqs = rcu_force_quiescent_state, 445 .stats = NULL, 446 .stall_dur = rcu_jiffies_till_stall_check, 447 .irq_capable = 1, 448 .can_boost = rcu_can_boost(), 449 .extendables = RCUTORTURE_MAX_EXTEND, 450 .name = "rcu" 451 }; 452 453 /* 454 * Don't even think about trying any of these in real life!!! 455 * The names includes "busted", and they really means it! 456 * The only purpose of these functions is to provide a buggy RCU 457 * implementation to make sure that rcutorture correctly emits 458 * buggy-RCU error messages. 459 */ 460 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 461 { 462 /* This is a deliberate bug for testing purposes only! */ 463 rcu_torture_cb(&p->rtort_rcu); 464 } 465 466 static void synchronize_rcu_busted(void) 467 { 468 /* This is a deliberate bug for testing purposes only! */ 469 } 470 471 static void 472 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 473 { 474 /* This is a deliberate bug for testing purposes only! */ 475 func(head); 476 } 477 478 static struct rcu_torture_ops rcu_busted_ops = { 479 .ttype = INVALID_RCU_FLAVOR, 480 .init = rcu_sync_torture_init, 481 .readlock = rcu_torture_read_lock, 482 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 483 .readunlock = rcu_torture_read_unlock, 484 .get_gp_seq = rcu_no_completed, 485 .deferred_free = rcu_busted_torture_deferred_free, 486 .sync = synchronize_rcu_busted, 487 .exp_sync = synchronize_rcu_busted, 488 .call = call_rcu_busted, 489 .cb_barrier = NULL, 490 .fqs = NULL, 491 .stats = NULL, 492 .irq_capable = 1, 493 .name = "busted" 494 }; 495 496 /* 497 * Definitions for srcu torture testing. 498 */ 499 500 DEFINE_STATIC_SRCU(srcu_ctl); 501 static struct srcu_struct srcu_ctld; 502 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 503 504 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 505 { 506 return srcu_read_lock(srcu_ctlp); 507 } 508 509 static void 510 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 511 { 512 long delay; 513 const long uspertick = 1000000 / HZ; 514 const long longdelay = 10; 515 516 /* We want there to be long-running readers, but not all the time. */ 517 518 delay = torture_random(rrsp) % 519 (nrealreaders * 2 * longdelay * uspertick); 520 if (!delay && in_task()) { 521 schedule_timeout_interruptible(longdelay); 522 rtrsp->rt_delay_jiffies = longdelay; 523 } else { 524 rcu_read_delay(rrsp, rtrsp); 525 } 526 } 527 528 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 529 { 530 srcu_read_unlock(srcu_ctlp, idx); 531 } 532 533 static unsigned long srcu_torture_completed(void) 534 { 535 return srcu_batches_completed(srcu_ctlp); 536 } 537 538 static void srcu_torture_deferred_free(struct rcu_torture *rp) 539 { 540 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 541 } 542 543 static void srcu_torture_synchronize(void) 544 { 545 synchronize_srcu(srcu_ctlp); 546 } 547 548 static void srcu_torture_call(struct rcu_head *head, 549 rcu_callback_t func) 550 { 551 call_srcu(srcu_ctlp, head, func); 552 } 553 554 static void srcu_torture_barrier(void) 555 { 556 srcu_barrier(srcu_ctlp); 557 } 558 559 static void srcu_torture_stats(void) 560 { 561 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 562 } 563 564 static void srcu_torture_synchronize_expedited(void) 565 { 566 synchronize_srcu_expedited(srcu_ctlp); 567 } 568 569 static struct rcu_torture_ops srcu_ops = { 570 .ttype = SRCU_FLAVOR, 571 .init = rcu_sync_torture_init, 572 .readlock = srcu_torture_read_lock, 573 .read_delay = srcu_read_delay, 574 .readunlock = srcu_torture_read_unlock, 575 .get_gp_seq = srcu_torture_completed, 576 .deferred_free = srcu_torture_deferred_free, 577 .sync = srcu_torture_synchronize, 578 .exp_sync = srcu_torture_synchronize_expedited, 579 .call = srcu_torture_call, 580 .cb_barrier = srcu_torture_barrier, 581 .stats = srcu_torture_stats, 582 .irq_capable = 1, 583 .name = "srcu" 584 }; 585 586 static void srcu_torture_init(void) 587 { 588 rcu_sync_torture_init(); 589 WARN_ON(init_srcu_struct(&srcu_ctld)); 590 srcu_ctlp = &srcu_ctld; 591 } 592 593 static void srcu_torture_cleanup(void) 594 { 595 static DEFINE_TORTURE_RANDOM(rand); 596 597 if (torture_random(&rand) & 0x800) 598 cleanup_srcu_struct(&srcu_ctld); 599 else 600 cleanup_srcu_struct_quiesced(&srcu_ctld); 601 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 602 } 603 604 /* As above, but dynamically allocated. */ 605 static struct rcu_torture_ops srcud_ops = { 606 .ttype = SRCU_FLAVOR, 607 .init = srcu_torture_init, 608 .cleanup = srcu_torture_cleanup, 609 .readlock = srcu_torture_read_lock, 610 .read_delay = srcu_read_delay, 611 .readunlock = srcu_torture_read_unlock, 612 .get_gp_seq = srcu_torture_completed, 613 .deferred_free = srcu_torture_deferred_free, 614 .sync = srcu_torture_synchronize, 615 .exp_sync = srcu_torture_synchronize_expedited, 616 .call = srcu_torture_call, 617 .cb_barrier = srcu_torture_barrier, 618 .stats = srcu_torture_stats, 619 .irq_capable = 1, 620 .name = "srcud" 621 }; 622 623 /* As above, but broken due to inappropriate reader extension. */ 624 static struct rcu_torture_ops busted_srcud_ops = { 625 .ttype = SRCU_FLAVOR, 626 .init = srcu_torture_init, 627 .cleanup = srcu_torture_cleanup, 628 .readlock = srcu_torture_read_lock, 629 .read_delay = rcu_read_delay, 630 .readunlock = srcu_torture_read_unlock, 631 .get_gp_seq = srcu_torture_completed, 632 .deferred_free = srcu_torture_deferred_free, 633 .sync = srcu_torture_synchronize, 634 .exp_sync = srcu_torture_synchronize_expedited, 635 .call = srcu_torture_call, 636 .cb_barrier = srcu_torture_barrier, 637 .stats = srcu_torture_stats, 638 .irq_capable = 1, 639 .extendables = RCUTORTURE_MAX_EXTEND, 640 .name = "busted_srcud" 641 }; 642 643 /* 644 * Definitions for RCU-tasks torture testing. 645 */ 646 647 static int tasks_torture_read_lock(void) 648 { 649 return 0; 650 } 651 652 static void tasks_torture_read_unlock(int idx) 653 { 654 } 655 656 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 657 { 658 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 659 } 660 661 static struct rcu_torture_ops tasks_ops = { 662 .ttype = RCU_TASKS_FLAVOR, 663 .init = rcu_sync_torture_init, 664 .readlock = tasks_torture_read_lock, 665 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 666 .readunlock = tasks_torture_read_unlock, 667 .get_gp_seq = rcu_no_completed, 668 .deferred_free = rcu_tasks_torture_deferred_free, 669 .sync = synchronize_rcu_tasks, 670 .exp_sync = synchronize_rcu_tasks, 671 .call = call_rcu_tasks, 672 .cb_barrier = rcu_barrier_tasks, 673 .fqs = NULL, 674 .stats = NULL, 675 .irq_capable = 1, 676 .name = "tasks" 677 }; 678 679 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 680 { 681 if (!cur_ops->gp_diff) 682 return new - old; 683 return cur_ops->gp_diff(new, old); 684 } 685 686 static bool __maybe_unused torturing_tasks(void) 687 { 688 return cur_ops == &tasks_ops; 689 } 690 691 /* 692 * RCU torture priority-boost testing. Runs one real-time thread per 693 * CPU for moderate bursts, repeatedly registering RCU callbacks and 694 * spinning waiting for them to be invoked. If a given callback takes 695 * too long to be invoked, we assume that priority inversion has occurred. 696 */ 697 698 struct rcu_boost_inflight { 699 struct rcu_head rcu; 700 int inflight; 701 }; 702 703 static void rcu_torture_boost_cb(struct rcu_head *head) 704 { 705 struct rcu_boost_inflight *rbip = 706 container_of(head, struct rcu_boost_inflight, rcu); 707 708 /* Ensure RCU-core accesses precede clearing ->inflight */ 709 smp_store_release(&rbip->inflight, 0); 710 } 711 712 static int old_rt_runtime = -1; 713 714 static void rcu_torture_disable_rt_throttle(void) 715 { 716 /* 717 * Disable RT throttling so that rcutorture's boost threads don't get 718 * throttled. Only possible if rcutorture is built-in otherwise the 719 * user should manually do this by setting the sched_rt_period_us and 720 * sched_rt_runtime sysctls. 721 */ 722 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 723 return; 724 725 old_rt_runtime = sysctl_sched_rt_runtime; 726 sysctl_sched_rt_runtime = -1; 727 } 728 729 static void rcu_torture_enable_rt_throttle(void) 730 { 731 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 732 return; 733 734 sysctl_sched_rt_runtime = old_rt_runtime; 735 old_rt_runtime = -1; 736 } 737 738 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 739 { 740 if (end - start > test_boost_duration * HZ - HZ / 2) { 741 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 742 n_rcu_torture_boost_failure++; 743 744 return true; /* failed */ 745 } 746 747 return false; /* passed */ 748 } 749 750 static int rcu_torture_boost(void *arg) 751 { 752 unsigned long call_rcu_time; 753 unsigned long endtime; 754 unsigned long oldstarttime; 755 struct rcu_boost_inflight rbi = { .inflight = 0 }; 756 struct sched_param sp; 757 758 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 759 760 /* Set real-time priority. */ 761 sp.sched_priority = 1; 762 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 763 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 764 n_rcu_torture_boost_rterror++; 765 } 766 767 init_rcu_head_on_stack(&rbi.rcu); 768 /* Each pass through the following loop does one boost-test cycle. */ 769 do { 770 /* Track if the test failed already in this test interval? */ 771 bool failed = false; 772 773 /* Increment n_rcu_torture_boosts once per boost-test */ 774 while (!kthread_should_stop()) { 775 if (mutex_trylock(&boost_mutex)) { 776 n_rcu_torture_boosts++; 777 mutex_unlock(&boost_mutex); 778 break; 779 } 780 schedule_timeout_uninterruptible(1); 781 } 782 if (kthread_should_stop()) 783 goto checkwait; 784 785 /* Wait for the next test interval. */ 786 oldstarttime = boost_starttime; 787 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 788 schedule_timeout_interruptible(oldstarttime - jiffies); 789 stutter_wait("rcu_torture_boost"); 790 if (torture_must_stop()) 791 goto checkwait; 792 } 793 794 /* Do one boost-test interval. */ 795 endtime = oldstarttime + test_boost_duration * HZ; 796 call_rcu_time = jiffies; 797 while (ULONG_CMP_LT(jiffies, endtime)) { 798 /* If we don't have a callback in flight, post one. */ 799 if (!smp_load_acquire(&rbi.inflight)) { 800 /* RCU core before ->inflight = 1. */ 801 smp_store_release(&rbi.inflight, 1); 802 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 803 /* Check if the boost test failed */ 804 failed = failed || 805 rcu_torture_boost_failed(call_rcu_time, 806 jiffies); 807 call_rcu_time = jiffies; 808 } 809 stutter_wait("rcu_torture_boost"); 810 if (torture_must_stop()) 811 goto checkwait; 812 } 813 814 /* 815 * If boost never happened, then inflight will always be 1, in 816 * this case the boost check would never happen in the above 817 * loop so do another one here. 818 */ 819 if (!failed && smp_load_acquire(&rbi.inflight)) 820 rcu_torture_boost_failed(call_rcu_time, jiffies); 821 822 /* 823 * Set the start time of the next test interval. 824 * Yes, this is vulnerable to long delays, but such 825 * delays simply cause a false negative for the next 826 * interval. Besides, we are running at RT priority, 827 * so delays should be relatively rare. 828 */ 829 while (oldstarttime == boost_starttime && 830 !kthread_should_stop()) { 831 if (mutex_trylock(&boost_mutex)) { 832 boost_starttime = jiffies + 833 test_boost_interval * HZ; 834 mutex_unlock(&boost_mutex); 835 break; 836 } 837 schedule_timeout_uninterruptible(1); 838 } 839 840 /* Go do the stutter. */ 841 checkwait: stutter_wait("rcu_torture_boost"); 842 } while (!torture_must_stop()); 843 844 /* Clean up and exit. */ 845 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 846 torture_shutdown_absorb("rcu_torture_boost"); 847 schedule_timeout_uninterruptible(1); 848 } 849 destroy_rcu_head_on_stack(&rbi.rcu); 850 torture_kthread_stopping("rcu_torture_boost"); 851 return 0; 852 } 853 854 /* 855 * RCU torture force-quiescent-state kthread. Repeatedly induces 856 * bursts of calls to force_quiescent_state(), increasing the probability 857 * of occurrence of some important types of race conditions. 858 */ 859 static int 860 rcu_torture_fqs(void *arg) 861 { 862 unsigned long fqs_resume_time; 863 int fqs_burst_remaining; 864 865 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 866 do { 867 fqs_resume_time = jiffies + fqs_stutter * HZ; 868 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 869 !kthread_should_stop()) { 870 schedule_timeout_interruptible(1); 871 } 872 fqs_burst_remaining = fqs_duration; 873 while (fqs_burst_remaining > 0 && 874 !kthread_should_stop()) { 875 cur_ops->fqs(); 876 udelay(fqs_holdoff); 877 fqs_burst_remaining -= fqs_holdoff; 878 } 879 stutter_wait("rcu_torture_fqs"); 880 } while (!torture_must_stop()); 881 torture_kthread_stopping("rcu_torture_fqs"); 882 return 0; 883 } 884 885 /* 886 * RCU torture writer kthread. Repeatedly substitutes a new structure 887 * for that pointed to by rcu_torture_current, freeing the old structure 888 * after a series of grace periods (the "pipeline"). 889 */ 890 static int 891 rcu_torture_writer(void *arg) 892 { 893 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 894 int expediting = 0; 895 unsigned long gp_snap; 896 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 897 bool gp_sync1 = gp_sync; 898 int i; 899 struct rcu_torture *rp; 900 struct rcu_torture *old_rp; 901 static DEFINE_TORTURE_RANDOM(rand); 902 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 903 RTWS_COND_GET, RTWS_SYNC }; 904 int nsynctypes = 0; 905 906 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 907 if (!can_expedite) 908 pr_alert("%s" TORTURE_FLAG 909 " GP expediting controlled from boot/sysfs for %s.\n", 910 torture_type, cur_ops->name); 911 912 /* Initialize synctype[] array. If none set, take default. */ 913 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 914 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 915 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 916 synctype[nsynctypes++] = RTWS_COND_GET; 917 pr_info("%s: Testing conditional GPs.\n", __func__); 918 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 919 pr_alert("%s: gp_cond without primitives.\n", __func__); 920 } 921 if (gp_exp1 && cur_ops->exp_sync) { 922 synctype[nsynctypes++] = RTWS_EXP_SYNC; 923 pr_info("%s: Testing expedited GPs.\n", __func__); 924 } else if (gp_exp && !cur_ops->exp_sync) { 925 pr_alert("%s: gp_exp without primitives.\n", __func__); 926 } 927 if (gp_normal1 && cur_ops->deferred_free) { 928 synctype[nsynctypes++] = RTWS_DEF_FREE; 929 pr_info("%s: Testing asynchronous GPs.\n", __func__); 930 } else if (gp_normal && !cur_ops->deferred_free) { 931 pr_alert("%s: gp_normal without primitives.\n", __func__); 932 } 933 if (gp_sync1 && cur_ops->sync) { 934 synctype[nsynctypes++] = RTWS_SYNC; 935 pr_info("%s: Testing normal GPs.\n", __func__); 936 } else if (gp_sync && !cur_ops->sync) { 937 pr_alert("%s: gp_sync without primitives.\n", __func__); 938 } 939 if (WARN_ONCE(nsynctypes == 0, 940 "rcu_torture_writer: No update-side primitives.\n")) { 941 /* 942 * No updates primitives, so don't try updating. 943 * The resulting test won't be testing much, hence the 944 * above WARN_ONCE(). 945 */ 946 rcu_torture_writer_state = RTWS_STOPPING; 947 torture_kthread_stopping("rcu_torture_writer"); 948 } 949 950 do { 951 rcu_torture_writer_state = RTWS_FIXED_DELAY; 952 schedule_timeout_uninterruptible(1); 953 rp = rcu_torture_alloc(); 954 if (rp == NULL) 955 continue; 956 rp->rtort_pipe_count = 0; 957 rcu_torture_writer_state = RTWS_DELAY; 958 udelay(torture_random(&rand) & 0x3ff); 959 rcu_torture_writer_state = RTWS_REPLACE; 960 old_rp = rcu_dereference_check(rcu_torture_current, 961 current == writer_task); 962 rp->rtort_mbtest = 1; 963 rcu_assign_pointer(rcu_torture_current, rp); 964 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 965 if (old_rp) { 966 i = old_rp->rtort_pipe_count; 967 if (i > RCU_TORTURE_PIPE_LEN) 968 i = RCU_TORTURE_PIPE_LEN; 969 atomic_inc(&rcu_torture_wcount[i]); 970 old_rp->rtort_pipe_count++; 971 switch (synctype[torture_random(&rand) % nsynctypes]) { 972 case RTWS_DEF_FREE: 973 rcu_torture_writer_state = RTWS_DEF_FREE; 974 cur_ops->deferred_free(old_rp); 975 break; 976 case RTWS_EXP_SYNC: 977 rcu_torture_writer_state = RTWS_EXP_SYNC; 978 cur_ops->exp_sync(); 979 rcu_torture_pipe_update(old_rp); 980 break; 981 case RTWS_COND_GET: 982 rcu_torture_writer_state = RTWS_COND_GET; 983 gp_snap = cur_ops->get_state(); 984 i = torture_random(&rand) % 16; 985 if (i != 0) 986 schedule_timeout_interruptible(i); 987 udelay(torture_random(&rand) % 1000); 988 rcu_torture_writer_state = RTWS_COND_SYNC; 989 cur_ops->cond_sync(gp_snap); 990 rcu_torture_pipe_update(old_rp); 991 break; 992 case RTWS_SYNC: 993 rcu_torture_writer_state = RTWS_SYNC; 994 cur_ops->sync(); 995 rcu_torture_pipe_update(old_rp); 996 break; 997 default: 998 WARN_ON_ONCE(1); 999 break; 1000 } 1001 } 1002 WRITE_ONCE(rcu_torture_current_version, 1003 rcu_torture_current_version + 1); 1004 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1005 if (can_expedite && 1006 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1007 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1008 if (expediting >= 0) 1009 rcu_expedite_gp(); 1010 else 1011 rcu_unexpedite_gp(); 1012 if (++expediting > 3) 1013 expediting = -expediting; 1014 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1015 can_expedite = !rcu_gp_is_expedited() && 1016 !rcu_gp_is_normal(); 1017 } 1018 rcu_torture_writer_state = RTWS_STUTTER; 1019 if (stutter_wait("rcu_torture_writer")) 1020 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1021 if (list_empty(&rcu_tortures[i].rtort_free)) 1022 WARN_ON_ONCE(1); 1023 } while (!torture_must_stop()); 1024 /* Reset expediting back to unexpedited. */ 1025 if (expediting > 0) 1026 expediting = -expediting; 1027 while (can_expedite && expediting++ < 0) 1028 rcu_unexpedite_gp(); 1029 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1030 if (!can_expedite) 1031 pr_alert("%s" TORTURE_FLAG 1032 " Dynamic grace-period expediting was disabled.\n", 1033 torture_type); 1034 rcu_torture_writer_state = RTWS_STOPPING; 1035 torture_kthread_stopping("rcu_torture_writer"); 1036 return 0; 1037 } 1038 1039 /* 1040 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1041 * delay between calls. 1042 */ 1043 static int 1044 rcu_torture_fakewriter(void *arg) 1045 { 1046 DEFINE_TORTURE_RANDOM(rand); 1047 1048 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1049 set_user_nice(current, MAX_NICE); 1050 1051 do { 1052 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1053 udelay(torture_random(&rand) & 0x3ff); 1054 if (cur_ops->cb_barrier != NULL && 1055 torture_random(&rand) % (nfakewriters * 8) == 0) { 1056 cur_ops->cb_barrier(); 1057 } else if (gp_normal == gp_exp) { 1058 if (cur_ops->sync && torture_random(&rand) & 0x80) 1059 cur_ops->sync(); 1060 else if (cur_ops->exp_sync) 1061 cur_ops->exp_sync(); 1062 } else if (gp_normal && cur_ops->sync) { 1063 cur_ops->sync(); 1064 } else if (cur_ops->exp_sync) { 1065 cur_ops->exp_sync(); 1066 } 1067 stutter_wait("rcu_torture_fakewriter"); 1068 } while (!torture_must_stop()); 1069 1070 torture_kthread_stopping("rcu_torture_fakewriter"); 1071 return 0; 1072 } 1073 1074 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1075 { 1076 kfree(rhp); 1077 } 1078 1079 /* 1080 * Do one extension of an RCU read-side critical section using the 1081 * current reader state in readstate (set to zero for initial entry 1082 * to extended critical section), set the new state as specified by 1083 * newstate (set to zero for final exit from extended critical section), 1084 * and random-number-generator state in trsp. If this is neither the 1085 * beginning or end of the critical section and if there was actually a 1086 * change, do a ->read_delay(). 1087 */ 1088 static void rcutorture_one_extend(int *readstate, int newstate, 1089 struct torture_random_state *trsp, 1090 struct rt_read_seg *rtrsp) 1091 { 1092 int idxnew = -1; 1093 int idxold = *readstate; 1094 int statesnew = ~*readstate & newstate; 1095 int statesold = *readstate & ~newstate; 1096 1097 WARN_ON_ONCE(idxold < 0); 1098 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1099 rtrsp->rt_readstate = newstate; 1100 1101 /* First, put new protection in place to avoid critical-section gap. */ 1102 if (statesnew & RCUTORTURE_RDR_BH) 1103 local_bh_disable(); 1104 if (statesnew & RCUTORTURE_RDR_IRQ) 1105 local_irq_disable(); 1106 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1107 preempt_disable(); 1108 if (statesnew & RCUTORTURE_RDR_RBH) 1109 rcu_read_lock_bh(); 1110 if (statesnew & RCUTORTURE_RDR_SCHED) 1111 rcu_read_lock_sched(); 1112 if (statesnew & RCUTORTURE_RDR_RCU) 1113 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1114 1115 /* Next, remove old protection, irq first due to bh conflict. */ 1116 if (statesold & RCUTORTURE_RDR_IRQ) 1117 local_irq_enable(); 1118 if (statesold & RCUTORTURE_RDR_BH) 1119 local_bh_enable(); 1120 if (statesold & RCUTORTURE_RDR_PREEMPT) 1121 preempt_enable(); 1122 if (statesold & RCUTORTURE_RDR_RBH) 1123 rcu_read_unlock_bh(); 1124 if (statesold & RCUTORTURE_RDR_SCHED) 1125 rcu_read_unlock_sched(); 1126 if (statesold & RCUTORTURE_RDR_RCU) 1127 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1128 1129 /* Delay if neither beginning nor end and there was a change. */ 1130 if ((statesnew || statesold) && *readstate && newstate) 1131 cur_ops->read_delay(trsp, rtrsp); 1132 1133 /* Update the reader state. */ 1134 if (idxnew == -1) 1135 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1136 WARN_ON_ONCE(idxnew < 0); 1137 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1138 *readstate = idxnew | newstate; 1139 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1140 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1141 } 1142 1143 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1144 static int rcutorture_extend_mask_max(void) 1145 { 1146 int mask; 1147 1148 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1149 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1150 mask = mask | RCUTORTURE_RDR_RCU; 1151 return mask; 1152 } 1153 1154 /* Return a random protection state mask, but with at least one bit set. */ 1155 static int 1156 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1157 { 1158 int mask = rcutorture_extend_mask_max(); 1159 unsigned long randmask1 = torture_random(trsp) >> 8; 1160 unsigned long randmask2 = randmask1 >> 3; 1161 1162 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1163 /* Most of the time lots of bits, half the time only one bit. */ 1164 if (!(randmask1 & 0x7)) 1165 mask = mask & randmask2; 1166 else 1167 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1168 /* Can't enable bh w/irq disabled. */ 1169 if ((mask & RCUTORTURE_RDR_IRQ) && 1170 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1171 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1172 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1173 if ((mask & RCUTORTURE_RDR_IRQ) && 1174 !(mask & cur_ops->ext_irq_conflict) && 1175 (oldmask & cur_ops->ext_irq_conflict)) 1176 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ 1177 return mask ?: RCUTORTURE_RDR_RCU; 1178 } 1179 1180 /* 1181 * Do a randomly selected number of extensions of an existing RCU read-side 1182 * critical section. 1183 */ 1184 static struct rt_read_seg * 1185 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1186 struct rt_read_seg *rtrsp) 1187 { 1188 int i; 1189 int j; 1190 int mask = rcutorture_extend_mask_max(); 1191 1192 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1193 if (!((mask - 1) & mask)) 1194 return rtrsp; /* Current RCU reader not extendable. */ 1195 /* Bias towards larger numbers of loops. */ 1196 i = (torture_random(trsp) >> 3); 1197 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1198 for (j = 0; j < i; j++) { 1199 mask = rcutorture_extend_mask(*readstate, trsp); 1200 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1201 } 1202 return &rtrsp[j]; 1203 } 1204 1205 /* 1206 * Do one read-side critical section, returning false if there was 1207 * no data to read. Can be invoked both from process context and 1208 * from a timer handler. 1209 */ 1210 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1211 { 1212 int i; 1213 unsigned long started; 1214 unsigned long completed; 1215 int newstate; 1216 struct rcu_torture *p; 1217 int pipe_count; 1218 int readstate = 0; 1219 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1220 struct rt_read_seg *rtrsp = &rtseg[0]; 1221 struct rt_read_seg *rtrsp1; 1222 unsigned long long ts; 1223 1224 newstate = rcutorture_extend_mask(readstate, trsp); 1225 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1226 started = cur_ops->get_gp_seq(); 1227 ts = rcu_trace_clock_local(); 1228 p = rcu_dereference_check(rcu_torture_current, 1229 rcu_read_lock_bh_held() || 1230 rcu_read_lock_sched_held() || 1231 srcu_read_lock_held(srcu_ctlp) || 1232 torturing_tasks()); 1233 if (p == NULL) { 1234 /* Wait for rcu_torture_writer to get underway */ 1235 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1236 return false; 1237 } 1238 if (p->rtort_mbtest == 0) 1239 atomic_inc(&n_rcu_torture_mberror); 1240 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1241 preempt_disable(); 1242 pipe_count = p->rtort_pipe_count; 1243 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1244 /* Should not happen, but... */ 1245 pipe_count = RCU_TORTURE_PIPE_LEN; 1246 } 1247 completed = cur_ops->get_gp_seq(); 1248 if (pipe_count > 1) { 1249 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1250 ts, started, completed); 1251 rcu_ftrace_dump(DUMP_ALL); 1252 } 1253 __this_cpu_inc(rcu_torture_count[pipe_count]); 1254 completed = rcutorture_seq_diff(completed, started); 1255 if (completed > RCU_TORTURE_PIPE_LEN) { 1256 /* Should not happen, but... */ 1257 completed = RCU_TORTURE_PIPE_LEN; 1258 } 1259 __this_cpu_inc(rcu_torture_batch[completed]); 1260 preempt_enable(); 1261 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1262 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1263 1264 /* If error or close call, record the sequence of reader protections. */ 1265 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1266 i = 0; 1267 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1268 err_segs[i++] = *rtrsp1; 1269 rt_read_nsegs = i; 1270 } 1271 1272 return true; 1273 } 1274 1275 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1276 1277 /* 1278 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1279 * incrementing the corresponding element of the pipeline array. The 1280 * counter in the element should never be greater than 1, otherwise, the 1281 * RCU implementation is broken. 1282 */ 1283 static void rcu_torture_timer(struct timer_list *unused) 1284 { 1285 atomic_long_inc(&n_rcu_torture_timers); 1286 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1287 1288 /* Test call_rcu() invocation from interrupt handler. */ 1289 if (cur_ops->call) { 1290 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1291 1292 if (rhp) 1293 cur_ops->call(rhp, rcu_torture_timer_cb); 1294 } 1295 } 1296 1297 /* 1298 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1299 * incrementing the corresponding element of the pipeline array. The 1300 * counter in the element should never be greater than 1, otherwise, the 1301 * RCU implementation is broken. 1302 */ 1303 static int 1304 rcu_torture_reader(void *arg) 1305 { 1306 unsigned long lastsleep = jiffies; 1307 long myid = (long)arg; 1308 int mynumonline = myid; 1309 DEFINE_TORTURE_RANDOM(rand); 1310 struct timer_list t; 1311 1312 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1313 set_user_nice(current, MAX_NICE); 1314 if (irqreader && cur_ops->irq_capable) 1315 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1316 1317 do { 1318 if (irqreader && cur_ops->irq_capable) { 1319 if (!timer_pending(&t)) 1320 mod_timer(&t, jiffies + 1); 1321 } 1322 if (!rcu_torture_one_read(&rand)) 1323 schedule_timeout_interruptible(HZ); 1324 if (time_after(jiffies, lastsleep)) { 1325 schedule_timeout_interruptible(1); 1326 lastsleep = jiffies + 10; 1327 } 1328 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1329 schedule_timeout_interruptible(HZ / 5); 1330 stutter_wait("rcu_torture_reader"); 1331 } while (!torture_must_stop()); 1332 if (irqreader && cur_ops->irq_capable) { 1333 del_timer_sync(&t); 1334 destroy_timer_on_stack(&t); 1335 } 1336 torture_kthread_stopping("rcu_torture_reader"); 1337 return 0; 1338 } 1339 1340 /* 1341 * Print torture statistics. Caller must ensure that there is only 1342 * one call to this function at a given time!!! This is normally 1343 * accomplished by relying on the module system to only have one copy 1344 * of the module loaded, and then by giving the rcu_torture_stats 1345 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1346 * thread is not running). 1347 */ 1348 static void 1349 rcu_torture_stats_print(void) 1350 { 1351 int cpu; 1352 int i; 1353 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1354 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1355 static unsigned long rtcv_snap = ULONG_MAX; 1356 static bool splatted; 1357 struct task_struct *wtp; 1358 1359 for_each_possible_cpu(cpu) { 1360 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1361 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1362 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1363 } 1364 } 1365 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1366 if (pipesummary[i] != 0) 1367 break; 1368 } 1369 1370 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1371 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1372 rcu_torture_current, 1373 rcu_torture_current_version, 1374 list_empty(&rcu_torture_freelist), 1375 atomic_read(&n_rcu_torture_alloc), 1376 atomic_read(&n_rcu_torture_alloc_fail), 1377 atomic_read(&n_rcu_torture_free)); 1378 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1379 atomic_read(&n_rcu_torture_mberror), 1380 n_rcu_torture_barrier_error, 1381 n_rcu_torture_boost_ktrerror, 1382 n_rcu_torture_boost_rterror); 1383 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1384 n_rcu_torture_boost_failure, 1385 n_rcu_torture_boosts, 1386 atomic_long_read(&n_rcu_torture_timers)); 1387 torture_onoff_stats(); 1388 pr_cont("barrier: %ld/%ld:%ld\n", 1389 n_barrier_successes, 1390 n_barrier_attempts, 1391 n_rcu_torture_barrier_error); 1392 1393 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1394 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1395 n_rcu_torture_barrier_error != 0 || 1396 n_rcu_torture_boost_ktrerror != 0 || 1397 n_rcu_torture_boost_rterror != 0 || 1398 n_rcu_torture_boost_failure != 0 || 1399 i > 1) { 1400 pr_cont("%s", "!!! "); 1401 atomic_inc(&n_rcu_torture_error); 1402 WARN_ON_ONCE(1); 1403 } 1404 pr_cont("Reader Pipe: "); 1405 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1406 pr_cont(" %ld", pipesummary[i]); 1407 pr_cont("\n"); 1408 1409 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1410 pr_cont("Reader Batch: "); 1411 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1412 pr_cont(" %ld", batchsummary[i]); 1413 pr_cont("\n"); 1414 1415 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1416 pr_cont("Free-Block Circulation: "); 1417 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1418 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1419 } 1420 pr_cont("\n"); 1421 1422 if (cur_ops->stats) 1423 cur_ops->stats(); 1424 if (rtcv_snap == rcu_torture_current_version && 1425 rcu_torture_current != NULL) { 1426 int __maybe_unused flags = 0; 1427 unsigned long __maybe_unused gp_seq = 0; 1428 1429 rcutorture_get_gp_data(cur_ops->ttype, 1430 &flags, &gp_seq); 1431 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1432 &flags, &gp_seq); 1433 wtp = READ_ONCE(writer_task); 1434 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1435 rcu_torture_writer_state_getname(), 1436 rcu_torture_writer_state, gp_seq, flags, 1437 wtp == NULL ? ~0UL : wtp->state, 1438 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1439 if (!splatted && wtp) { 1440 sched_show_task(wtp); 1441 splatted = true; 1442 } 1443 show_rcu_gp_kthreads(); 1444 rcu_ftrace_dump(DUMP_ALL); 1445 } 1446 rtcv_snap = rcu_torture_current_version; 1447 } 1448 1449 /* 1450 * Periodically prints torture statistics, if periodic statistics printing 1451 * was specified via the stat_interval module parameter. 1452 */ 1453 static int 1454 rcu_torture_stats(void *arg) 1455 { 1456 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1457 do { 1458 schedule_timeout_interruptible(stat_interval * HZ); 1459 rcu_torture_stats_print(); 1460 torture_shutdown_absorb("rcu_torture_stats"); 1461 } while (!torture_must_stop()); 1462 torture_kthread_stopping("rcu_torture_stats"); 1463 return 0; 1464 } 1465 1466 static void 1467 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1468 { 1469 pr_alert("%s" TORTURE_FLAG 1470 "--- %s: nreaders=%d nfakewriters=%d " 1471 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1472 "shuffle_interval=%d stutter=%d irqreader=%d " 1473 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1474 "test_boost=%d/%d test_boost_interval=%d " 1475 "test_boost_duration=%d shutdown_secs=%d " 1476 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1477 "n_barrier_cbs=%d " 1478 "onoff_interval=%d onoff_holdoff=%d\n", 1479 torture_type, tag, nrealreaders, nfakewriters, 1480 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1481 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1482 test_boost, cur_ops->can_boost, 1483 test_boost_interval, test_boost_duration, shutdown_secs, 1484 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1485 n_barrier_cbs, 1486 onoff_interval, onoff_holdoff); 1487 } 1488 1489 static int rcutorture_booster_cleanup(unsigned int cpu) 1490 { 1491 struct task_struct *t; 1492 1493 if (boost_tasks[cpu] == NULL) 1494 return 0; 1495 mutex_lock(&boost_mutex); 1496 t = boost_tasks[cpu]; 1497 boost_tasks[cpu] = NULL; 1498 rcu_torture_enable_rt_throttle(); 1499 mutex_unlock(&boost_mutex); 1500 1501 /* This must be outside of the mutex, otherwise deadlock! */ 1502 torture_stop_kthread(rcu_torture_boost, t); 1503 return 0; 1504 } 1505 1506 static int rcutorture_booster_init(unsigned int cpu) 1507 { 1508 int retval; 1509 1510 if (boost_tasks[cpu] != NULL) 1511 return 0; /* Already created, nothing more to do. */ 1512 1513 /* Don't allow time recalculation while creating a new task. */ 1514 mutex_lock(&boost_mutex); 1515 rcu_torture_disable_rt_throttle(); 1516 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1517 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1518 cpu_to_node(cpu), 1519 "rcu_torture_boost"); 1520 if (IS_ERR(boost_tasks[cpu])) { 1521 retval = PTR_ERR(boost_tasks[cpu]); 1522 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1523 n_rcu_torture_boost_ktrerror++; 1524 boost_tasks[cpu] = NULL; 1525 mutex_unlock(&boost_mutex); 1526 return retval; 1527 } 1528 kthread_bind(boost_tasks[cpu], cpu); 1529 wake_up_process(boost_tasks[cpu]); 1530 mutex_unlock(&boost_mutex); 1531 return 0; 1532 } 1533 1534 /* 1535 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1536 * induces a CPU stall for the time specified by stall_cpu. 1537 */ 1538 static int rcu_torture_stall(void *args) 1539 { 1540 unsigned long stop_at; 1541 1542 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1543 if (stall_cpu_holdoff > 0) { 1544 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1545 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1546 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1547 } 1548 if (!kthread_should_stop()) { 1549 stop_at = ktime_get_seconds() + stall_cpu; 1550 /* RCU CPU stall is expected behavior in following code. */ 1551 rcu_read_lock(); 1552 if (stall_cpu_irqsoff) 1553 local_irq_disable(); 1554 else 1555 preempt_disable(); 1556 pr_alert("rcu_torture_stall start on CPU %d.\n", 1557 smp_processor_id()); 1558 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1559 stop_at)) 1560 continue; /* Induce RCU CPU stall warning. */ 1561 if (stall_cpu_irqsoff) 1562 local_irq_enable(); 1563 else 1564 preempt_enable(); 1565 rcu_read_unlock(); 1566 pr_alert("rcu_torture_stall end.\n"); 1567 } 1568 torture_shutdown_absorb("rcu_torture_stall"); 1569 while (!kthread_should_stop()) 1570 schedule_timeout_interruptible(10 * HZ); 1571 return 0; 1572 } 1573 1574 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1575 static int __init rcu_torture_stall_init(void) 1576 { 1577 if (stall_cpu <= 0) 1578 return 0; 1579 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1580 } 1581 1582 /* State structure for forward-progress self-propagating RCU callback. */ 1583 struct fwd_cb_state { 1584 struct rcu_head rh; 1585 int stop; 1586 }; 1587 1588 /* 1589 * Forward-progress self-propagating RCU callback function. Because 1590 * callbacks run from softirq, this function is an implicit RCU read-side 1591 * critical section. 1592 */ 1593 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1594 { 1595 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1596 1597 if (READ_ONCE(fcsp->stop)) { 1598 WRITE_ONCE(fcsp->stop, 2); 1599 return; 1600 } 1601 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1602 } 1603 1604 /* State for continuous-flood RCU callbacks. */ 1605 struct rcu_fwd_cb { 1606 struct rcu_head rh; 1607 struct rcu_fwd_cb *rfc_next; 1608 int rfc_gps; 1609 }; 1610 static DEFINE_SPINLOCK(rcu_fwd_lock); 1611 static struct rcu_fwd_cb *rcu_fwd_cb_head; 1612 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1613 static long n_launders_cb; 1614 static unsigned long rcu_fwd_startat; 1615 static bool rcu_fwd_emergency_stop; 1616 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1617 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1618 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1619 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1620 struct rcu_launder_hist { 1621 long n_launders; 1622 unsigned long launder_gp_seq; 1623 }; 1624 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1625 static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1626 static unsigned long rcu_launder_gp_seq_start; 1627 1628 static void rcu_torture_fwd_cb_hist(void) 1629 { 1630 unsigned long gps; 1631 unsigned long gps_old; 1632 int i; 1633 int j; 1634 1635 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1636 if (n_launders_hist[i].n_launders > 0) 1637 break; 1638 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1639 __func__, jiffies - rcu_fwd_startat); 1640 gps_old = rcu_launder_gp_seq_start; 1641 for (j = 0; j <= i; j++) { 1642 gps = n_launders_hist[j].launder_gp_seq; 1643 pr_cont(" %ds/%d: %ld:%ld", 1644 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders, 1645 rcutorture_seq_diff(gps, gps_old)); 1646 gps_old = gps; 1647 } 1648 pr_cont("\n"); 1649 } 1650 1651 /* Callback function for continuous-flood RCU callbacks. */ 1652 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1653 { 1654 unsigned long flags; 1655 int i; 1656 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1657 struct rcu_fwd_cb **rfcpp; 1658 1659 rfcp->rfc_next = NULL; 1660 rfcp->rfc_gps++; 1661 spin_lock_irqsave(&rcu_fwd_lock, flags); 1662 rfcpp = rcu_fwd_cb_tail; 1663 rcu_fwd_cb_tail = &rfcp->rfc_next; 1664 WRITE_ONCE(*rfcpp, rfcp); 1665 WRITE_ONCE(n_launders_cb, n_launders_cb + 1); 1666 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1667 if (i >= ARRAY_SIZE(n_launders_hist)) 1668 i = ARRAY_SIZE(n_launders_hist) - 1; 1669 n_launders_hist[i].n_launders++; 1670 n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1671 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1672 } 1673 1674 /* 1675 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1676 * test is over or because we hit an OOM event. 1677 */ 1678 static unsigned long rcu_torture_fwd_prog_cbfree(void) 1679 { 1680 unsigned long flags; 1681 unsigned long freed = 0; 1682 struct rcu_fwd_cb *rfcp; 1683 1684 for (;;) { 1685 spin_lock_irqsave(&rcu_fwd_lock, flags); 1686 rfcp = rcu_fwd_cb_head; 1687 if (!rfcp) 1688 break; 1689 rcu_fwd_cb_head = rfcp->rfc_next; 1690 if (!rcu_fwd_cb_head) 1691 rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1692 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1693 kfree(rfcp); 1694 freed++; 1695 } 1696 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1697 return freed; 1698 } 1699 1700 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1701 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) 1702 { 1703 unsigned long cver; 1704 unsigned long dur; 1705 struct fwd_cb_state fcs; 1706 unsigned long gps; 1707 int idx; 1708 int sd; 1709 int sd4; 1710 bool selfpropcb = false; 1711 unsigned long stopat; 1712 static DEFINE_TORTURE_RANDOM(trs); 1713 1714 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1715 init_rcu_head_on_stack(&fcs.rh); 1716 selfpropcb = true; 1717 } 1718 1719 /* Tight loop containing cond_resched(). */ 1720 if (selfpropcb) { 1721 WRITE_ONCE(fcs.stop, 0); 1722 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1723 } 1724 cver = READ_ONCE(rcu_torture_current_version); 1725 gps = cur_ops->get_gp_seq(); 1726 sd = cur_ops->stall_dur() + 1; 1727 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1728 dur = sd4 + torture_random(&trs) % (sd - sd4); 1729 WRITE_ONCE(rcu_fwd_startat, jiffies); 1730 stopat = rcu_fwd_startat + dur; 1731 while (time_before(jiffies, stopat) && 1732 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1733 idx = cur_ops->readlock(); 1734 udelay(10); 1735 cur_ops->readunlock(idx); 1736 if (!fwd_progress_need_resched || need_resched()) 1737 cond_resched(); 1738 } 1739 (*tested_tries)++; 1740 if (!time_before(jiffies, stopat) && 1741 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1742 (*tested)++; 1743 cver = READ_ONCE(rcu_torture_current_version) - cver; 1744 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1745 WARN_ON(!cver && gps < 2); 1746 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1747 } 1748 if (selfpropcb) { 1749 WRITE_ONCE(fcs.stop, 1); 1750 cur_ops->sync(); /* Wait for running CB to complete. */ 1751 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1752 } 1753 1754 if (selfpropcb) { 1755 WARN_ON(READ_ONCE(fcs.stop) != 2); 1756 destroy_rcu_head_on_stack(&fcs.rh); 1757 } 1758 } 1759 1760 /* Carry out call_rcu() forward-progress testing. */ 1761 static void rcu_torture_fwd_prog_cr(void) 1762 { 1763 unsigned long cver; 1764 unsigned long gps; 1765 int i; 1766 long n_launders; 1767 long n_launders_cb_snap; 1768 long n_launders_sa; 1769 long n_max_cbs; 1770 long n_max_gps; 1771 struct rcu_fwd_cb *rfcp; 1772 struct rcu_fwd_cb *rfcpn; 1773 unsigned long stopat; 1774 unsigned long stoppedat; 1775 1776 if (READ_ONCE(rcu_fwd_emergency_stop)) 1777 return; /* Get out of the way quickly, no GP wait! */ 1778 1779 /* Loop continuously posting RCU callbacks. */ 1780 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1781 cur_ops->sync(); /* Later readers see above write. */ 1782 WRITE_ONCE(rcu_fwd_startat, jiffies); 1783 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1784 n_launders = 0; 1785 n_launders_cb = 0; 1786 n_launders_sa = 0; 1787 n_max_cbs = 0; 1788 n_max_gps = 0; 1789 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1790 n_launders_hist[i].n_launders = 0; 1791 cver = READ_ONCE(rcu_torture_current_version); 1792 gps = cur_ops->get_gp_seq(); 1793 rcu_launder_gp_seq_start = gps; 1794 while (time_before(jiffies, stopat) && 1795 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1796 rfcp = READ_ONCE(rcu_fwd_cb_head); 1797 rfcpn = NULL; 1798 if (rfcp) 1799 rfcpn = READ_ONCE(rfcp->rfc_next); 1800 if (rfcpn) { 1801 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1802 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1803 break; 1804 rcu_fwd_cb_head = rfcpn; 1805 n_launders++; 1806 n_launders_sa++; 1807 } else { 1808 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1809 if (WARN_ON_ONCE(!rfcp)) { 1810 schedule_timeout_interruptible(1); 1811 continue; 1812 } 1813 n_max_cbs++; 1814 n_launders_sa = 0; 1815 rfcp->rfc_gps = 0; 1816 } 1817 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1818 cond_resched(); 1819 } 1820 stoppedat = jiffies; 1821 n_launders_cb_snap = READ_ONCE(n_launders_cb); 1822 cver = READ_ONCE(rcu_torture_current_version) - cver; 1823 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1824 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1825 (void)rcu_torture_fwd_prog_cbfree(); 1826 1827 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1828 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) { 1829 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1830 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1831 __func__, 1832 stoppedat - rcu_fwd_startat, jiffies - stoppedat, 1833 n_launders + n_max_cbs - n_launders_cb_snap, 1834 n_launders, n_launders_sa, 1835 n_max_gps, n_max_cbs, cver, gps); 1836 rcu_torture_fwd_cb_hist(); 1837 } 1838 } 1839 1840 1841 /* 1842 * OOM notifier, but this only prints diagnostic information for the 1843 * current forward-progress test. 1844 */ 1845 static int rcutorture_oom_notify(struct notifier_block *self, 1846 unsigned long notused, void *nfreed) 1847 { 1848 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 1849 __func__); 1850 rcu_torture_fwd_cb_hist(); 1851 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2)); 1852 WRITE_ONCE(rcu_fwd_emergency_stop, true); 1853 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 1854 pr_info("%s: Freed %lu RCU callbacks.\n", 1855 __func__, rcu_torture_fwd_prog_cbfree()); 1856 rcu_barrier(); 1857 pr_info("%s: Freed %lu RCU callbacks.\n", 1858 __func__, rcu_torture_fwd_prog_cbfree()); 1859 rcu_barrier(); 1860 pr_info("%s: Freed %lu RCU callbacks.\n", 1861 __func__, rcu_torture_fwd_prog_cbfree()); 1862 smp_mb(); /* Frees before return to avoid redoing OOM. */ 1863 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 1864 pr_info("%s returning after OOM processing.\n", __func__); 1865 return NOTIFY_OK; 1866 } 1867 1868 static struct notifier_block rcutorture_oom_nb = { 1869 .notifier_call = rcutorture_oom_notify 1870 }; 1871 1872 /* Carry out grace-period forward-progress testing. */ 1873 static int rcu_torture_fwd_prog(void *args) 1874 { 1875 int tested = 0; 1876 int tested_tries = 0; 1877 1878 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 1879 rcu_bind_current_to_nocb(); 1880 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 1881 set_user_nice(current, MAX_NICE); 1882 do { 1883 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 1884 WRITE_ONCE(rcu_fwd_emergency_stop, false); 1885 register_oom_notifier(&rcutorture_oom_nb); 1886 rcu_torture_fwd_prog_nr(&tested, &tested_tries); 1887 rcu_torture_fwd_prog_cr(); 1888 unregister_oom_notifier(&rcutorture_oom_nb); 1889 1890 /* Avoid slow periods, better to test when busy. */ 1891 stutter_wait("rcu_torture_fwd_prog"); 1892 } while (!torture_must_stop()); 1893 /* Short runs might not contain a valid forward-progress attempt. */ 1894 WARN_ON(!tested && tested_tries >= 5); 1895 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 1896 torture_kthread_stopping("rcu_torture_fwd_prog"); 1897 return 0; 1898 } 1899 1900 /* If forward-progress checking is requested and feasible, spawn the thread. */ 1901 static int __init rcu_torture_fwd_prog_init(void) 1902 { 1903 if (!fwd_progress) 1904 return 0; /* Not requested, so don't do it. */ 1905 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 1906 cur_ops == &rcu_busted_ops) { 1907 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 1908 return 0; 1909 } 1910 if (stall_cpu > 0) { 1911 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 1912 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 1913 return -EINVAL; /* In module, can fail back to user. */ 1914 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 1915 return 0; 1916 } 1917 if (fwd_progress_holdoff <= 0) 1918 fwd_progress_holdoff = 1; 1919 if (fwd_progress_div <= 0) 1920 fwd_progress_div = 4; 1921 return torture_create_kthread(rcu_torture_fwd_prog, 1922 NULL, fwd_prog_task); 1923 } 1924 1925 /* Callback function for RCU barrier testing. */ 1926 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1927 { 1928 atomic_inc(&barrier_cbs_invoked); 1929 } 1930 1931 /* kthread function to register callbacks used to test RCU barriers. */ 1932 static int rcu_torture_barrier_cbs(void *arg) 1933 { 1934 long myid = (long)arg; 1935 bool lastphase = 0; 1936 bool newphase; 1937 struct rcu_head rcu; 1938 1939 init_rcu_head_on_stack(&rcu); 1940 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 1941 set_user_nice(current, MAX_NICE); 1942 do { 1943 wait_event(barrier_cbs_wq[myid], 1944 (newphase = 1945 smp_load_acquire(&barrier_phase)) != lastphase || 1946 torture_must_stop()); 1947 lastphase = newphase; 1948 if (torture_must_stop()) 1949 break; 1950 /* 1951 * The above smp_load_acquire() ensures barrier_phase load 1952 * is ordered before the following ->call(). 1953 */ 1954 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 1955 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1956 local_irq_enable(); 1957 if (atomic_dec_and_test(&barrier_cbs_count)) 1958 wake_up(&barrier_wq); 1959 } while (!torture_must_stop()); 1960 if (cur_ops->cb_barrier != NULL) 1961 cur_ops->cb_barrier(); 1962 destroy_rcu_head_on_stack(&rcu); 1963 torture_kthread_stopping("rcu_torture_barrier_cbs"); 1964 return 0; 1965 } 1966 1967 /* kthread function to drive and coordinate RCU barrier testing. */ 1968 static int rcu_torture_barrier(void *arg) 1969 { 1970 int i; 1971 1972 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 1973 do { 1974 atomic_set(&barrier_cbs_invoked, 0); 1975 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1976 /* Ensure barrier_phase ordered after prior assignments. */ 1977 smp_store_release(&barrier_phase, !barrier_phase); 1978 for (i = 0; i < n_barrier_cbs; i++) 1979 wake_up(&barrier_cbs_wq[i]); 1980 wait_event(barrier_wq, 1981 atomic_read(&barrier_cbs_count) == 0 || 1982 torture_must_stop()); 1983 if (torture_must_stop()) 1984 break; 1985 n_barrier_attempts++; 1986 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1987 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1988 n_rcu_torture_barrier_error++; 1989 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 1990 atomic_read(&barrier_cbs_invoked), 1991 n_barrier_cbs); 1992 WARN_ON_ONCE(1); 1993 } else { 1994 n_barrier_successes++; 1995 } 1996 schedule_timeout_interruptible(HZ / 10); 1997 } while (!torture_must_stop()); 1998 torture_kthread_stopping("rcu_torture_barrier"); 1999 return 0; 2000 } 2001 2002 /* Initialize RCU barrier testing. */ 2003 static int rcu_torture_barrier_init(void) 2004 { 2005 int i; 2006 int ret; 2007 2008 if (n_barrier_cbs <= 0) 2009 return 0; 2010 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2011 pr_alert("%s" TORTURE_FLAG 2012 " Call or barrier ops missing for %s,\n", 2013 torture_type, cur_ops->name); 2014 pr_alert("%s" TORTURE_FLAG 2015 " RCU barrier testing omitted from run.\n", 2016 torture_type); 2017 return 0; 2018 } 2019 atomic_set(&barrier_cbs_count, 0); 2020 atomic_set(&barrier_cbs_invoked, 0); 2021 barrier_cbs_tasks = 2022 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2023 GFP_KERNEL); 2024 barrier_cbs_wq = 2025 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2026 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2027 return -ENOMEM; 2028 for (i = 0; i < n_barrier_cbs; i++) { 2029 init_waitqueue_head(&barrier_cbs_wq[i]); 2030 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2031 (void *)(long)i, 2032 barrier_cbs_tasks[i]); 2033 if (ret) 2034 return ret; 2035 } 2036 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2037 } 2038 2039 /* Clean up after RCU barrier testing. */ 2040 static void rcu_torture_barrier_cleanup(void) 2041 { 2042 int i; 2043 2044 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2045 if (barrier_cbs_tasks != NULL) { 2046 for (i = 0; i < n_barrier_cbs; i++) 2047 torture_stop_kthread(rcu_torture_barrier_cbs, 2048 barrier_cbs_tasks[i]); 2049 kfree(barrier_cbs_tasks); 2050 barrier_cbs_tasks = NULL; 2051 } 2052 if (barrier_cbs_wq != NULL) { 2053 kfree(barrier_cbs_wq); 2054 barrier_cbs_wq = NULL; 2055 } 2056 } 2057 2058 static bool rcu_torture_can_boost(void) 2059 { 2060 static int boost_warn_once; 2061 int prio; 2062 2063 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2064 return false; 2065 2066 prio = rcu_get_gp_kthreads_prio(); 2067 if (!prio) 2068 return false; 2069 2070 if (prio < 2) { 2071 if (boost_warn_once == 1) 2072 return false; 2073 2074 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2075 boost_warn_once = 1; 2076 return false; 2077 } 2078 2079 return true; 2080 } 2081 2082 static enum cpuhp_state rcutor_hp; 2083 2084 static void 2085 rcu_torture_cleanup(void) 2086 { 2087 int firsttime; 2088 int flags = 0; 2089 unsigned long gp_seq = 0; 2090 int i; 2091 2092 if (torture_cleanup_begin()) { 2093 if (cur_ops->cb_barrier != NULL) 2094 cur_ops->cb_barrier(); 2095 return; 2096 } 2097 2098 rcu_torture_barrier_cleanup(); 2099 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2100 torture_stop_kthread(rcu_torture_stall, stall_task); 2101 torture_stop_kthread(rcu_torture_writer, writer_task); 2102 2103 if (reader_tasks) { 2104 for (i = 0; i < nrealreaders; i++) 2105 torture_stop_kthread(rcu_torture_reader, 2106 reader_tasks[i]); 2107 kfree(reader_tasks); 2108 } 2109 rcu_torture_current = NULL; 2110 2111 if (fakewriter_tasks) { 2112 for (i = 0; i < nfakewriters; i++) { 2113 torture_stop_kthread(rcu_torture_fakewriter, 2114 fakewriter_tasks[i]); 2115 } 2116 kfree(fakewriter_tasks); 2117 fakewriter_tasks = NULL; 2118 } 2119 2120 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2121 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2122 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2123 cur_ops->name, gp_seq, flags); 2124 torture_stop_kthread(rcu_torture_stats, stats_task); 2125 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2126 if (rcu_torture_can_boost()) 2127 cpuhp_remove_state(rcutor_hp); 2128 2129 /* 2130 * Wait for all RCU callbacks to fire, then do torture-type-specific 2131 * cleanup operations. 2132 */ 2133 if (cur_ops->cb_barrier != NULL) 2134 cur_ops->cb_barrier(); 2135 if (cur_ops->cleanup != NULL) 2136 cur_ops->cleanup(); 2137 2138 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2139 2140 if (err_segs_recorded) { 2141 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2142 if (rt_read_nsegs == 0) 2143 pr_alert("\t: No segments recorded!!!\n"); 2144 firsttime = 1; 2145 for (i = 0; i < rt_read_nsegs; i++) { 2146 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2147 if (err_segs[i].rt_delay_jiffies != 0) { 2148 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2149 err_segs[i].rt_delay_jiffies); 2150 firsttime = 0; 2151 } 2152 if (err_segs[i].rt_delay_ms != 0) { 2153 pr_cont("%s%ldms", firsttime ? "" : "+", 2154 err_segs[i].rt_delay_ms); 2155 firsttime = 0; 2156 } 2157 if (err_segs[i].rt_delay_us != 0) { 2158 pr_cont("%s%ldus", firsttime ? "" : "+", 2159 err_segs[i].rt_delay_us); 2160 firsttime = 0; 2161 } 2162 pr_cont("%s\n", 2163 err_segs[i].rt_preempted ? "preempted" : ""); 2164 2165 } 2166 } 2167 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2168 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2169 else if (torture_onoff_failures()) 2170 rcu_torture_print_module_parms(cur_ops, 2171 "End of test: RCU_HOTPLUG"); 2172 else 2173 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2174 torture_cleanup_end(); 2175 } 2176 2177 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2178 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2179 { 2180 } 2181 2182 static void rcu_torture_err_cb(struct rcu_head *rhp) 2183 { 2184 /* 2185 * This -might- happen due to race conditions, but is unlikely. 2186 * The scenario that leads to this happening is that the 2187 * first of the pair of duplicate callbacks is queued, 2188 * someone else starts a grace period that includes that 2189 * callback, then the second of the pair must wait for the 2190 * next grace period. Unlikely, but can happen. If it 2191 * does happen, the debug-objects subsystem won't have splatted. 2192 */ 2193 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2194 } 2195 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2196 2197 /* 2198 * Verify that double-free causes debug-objects to complain, but only 2199 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2200 * cannot be carried out. 2201 */ 2202 static void rcu_test_debug_objects(void) 2203 { 2204 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2205 struct rcu_head rh1; 2206 struct rcu_head rh2; 2207 2208 init_rcu_head_on_stack(&rh1); 2209 init_rcu_head_on_stack(&rh2); 2210 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2211 2212 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2213 preempt_disable(); /* Prevent preemption from interrupting test. */ 2214 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2215 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2216 local_irq_disable(); /* Make it harder to start a new grace period. */ 2217 call_rcu(&rh2, rcu_torture_leak_cb); 2218 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2219 local_irq_enable(); 2220 rcu_read_unlock(); 2221 preempt_enable(); 2222 2223 /* Wait for them all to get done so we can safely return. */ 2224 rcu_barrier(); 2225 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2226 destroy_rcu_head_on_stack(&rh1); 2227 destroy_rcu_head_on_stack(&rh2); 2228 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2229 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2230 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2231 } 2232 2233 static void rcutorture_sync(void) 2234 { 2235 static unsigned long n; 2236 2237 if (cur_ops->sync && !(++n & 0xfff)) 2238 cur_ops->sync(); 2239 } 2240 2241 static int __init 2242 rcu_torture_init(void) 2243 { 2244 long i; 2245 int cpu; 2246 int firsterr = 0; 2247 static struct rcu_torture_ops *torture_ops[] = { 2248 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2249 &busted_srcud_ops, &tasks_ops, 2250 }; 2251 2252 if (!torture_init_begin(torture_type, verbose)) 2253 return -EBUSY; 2254 2255 /* Process args and tell the world that the torturer is on the job. */ 2256 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2257 cur_ops = torture_ops[i]; 2258 if (strcmp(torture_type, cur_ops->name) == 0) 2259 break; 2260 } 2261 if (i == ARRAY_SIZE(torture_ops)) { 2262 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2263 torture_type); 2264 pr_alert("rcu-torture types:"); 2265 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2266 pr_cont(" %s", torture_ops[i]->name); 2267 pr_cont("\n"); 2268 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2269 firsterr = -EINVAL; 2270 goto unwind; 2271 } 2272 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2273 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2274 fqs_duration = 0; 2275 } 2276 if (cur_ops->init) 2277 cur_ops->init(); 2278 2279 if (nreaders >= 0) { 2280 nrealreaders = nreaders; 2281 } else { 2282 nrealreaders = num_online_cpus() - 2 - nreaders; 2283 if (nrealreaders <= 0) 2284 nrealreaders = 1; 2285 } 2286 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2287 2288 /* Set up the freelist. */ 2289 2290 INIT_LIST_HEAD(&rcu_torture_freelist); 2291 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2292 rcu_tortures[i].rtort_mbtest = 0; 2293 list_add_tail(&rcu_tortures[i].rtort_free, 2294 &rcu_torture_freelist); 2295 } 2296 2297 /* Initialize the statistics so that each run gets its own numbers. */ 2298 2299 rcu_torture_current = NULL; 2300 rcu_torture_current_version = 0; 2301 atomic_set(&n_rcu_torture_alloc, 0); 2302 atomic_set(&n_rcu_torture_alloc_fail, 0); 2303 atomic_set(&n_rcu_torture_free, 0); 2304 atomic_set(&n_rcu_torture_mberror, 0); 2305 atomic_set(&n_rcu_torture_error, 0); 2306 n_rcu_torture_barrier_error = 0; 2307 n_rcu_torture_boost_ktrerror = 0; 2308 n_rcu_torture_boost_rterror = 0; 2309 n_rcu_torture_boost_failure = 0; 2310 n_rcu_torture_boosts = 0; 2311 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2312 atomic_set(&rcu_torture_wcount[i], 0); 2313 for_each_possible_cpu(cpu) { 2314 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2315 per_cpu(rcu_torture_count, cpu)[i] = 0; 2316 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2317 } 2318 } 2319 err_segs_recorded = 0; 2320 rt_read_nsegs = 0; 2321 2322 /* Start up the kthreads. */ 2323 2324 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2325 writer_task); 2326 if (firsterr) 2327 goto unwind; 2328 if (nfakewriters > 0) { 2329 fakewriter_tasks = kcalloc(nfakewriters, 2330 sizeof(fakewriter_tasks[0]), 2331 GFP_KERNEL); 2332 if (fakewriter_tasks == NULL) { 2333 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2334 firsterr = -ENOMEM; 2335 goto unwind; 2336 } 2337 } 2338 for (i = 0; i < nfakewriters; i++) { 2339 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2340 NULL, fakewriter_tasks[i]); 2341 if (firsterr) 2342 goto unwind; 2343 } 2344 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2345 GFP_KERNEL); 2346 if (reader_tasks == NULL) { 2347 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2348 firsterr = -ENOMEM; 2349 goto unwind; 2350 } 2351 for (i = 0; i < nrealreaders; i++) { 2352 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2353 reader_tasks[i]); 2354 if (firsterr) 2355 goto unwind; 2356 } 2357 if (stat_interval > 0) { 2358 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2359 stats_task); 2360 if (firsterr) 2361 goto unwind; 2362 } 2363 if (test_no_idle_hz && shuffle_interval > 0) { 2364 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2365 if (firsterr) 2366 goto unwind; 2367 } 2368 if (stutter < 0) 2369 stutter = 0; 2370 if (stutter) { 2371 firsterr = torture_stutter_init(stutter * HZ); 2372 if (firsterr) 2373 goto unwind; 2374 } 2375 if (fqs_duration < 0) 2376 fqs_duration = 0; 2377 if (fqs_duration) { 2378 /* Create the fqs thread */ 2379 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2380 fqs_task); 2381 if (firsterr) 2382 goto unwind; 2383 } 2384 if (test_boost_interval < 1) 2385 test_boost_interval = 1; 2386 if (test_boost_duration < 2) 2387 test_boost_duration = 2; 2388 if (rcu_torture_can_boost()) { 2389 2390 boost_starttime = jiffies + test_boost_interval * HZ; 2391 2392 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2393 rcutorture_booster_init, 2394 rcutorture_booster_cleanup); 2395 if (firsterr < 0) 2396 goto unwind; 2397 rcutor_hp = firsterr; 2398 } 2399 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2400 if (firsterr) 2401 goto unwind; 2402 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2403 rcutorture_sync); 2404 if (firsterr) 2405 goto unwind; 2406 firsterr = rcu_torture_stall_init(); 2407 if (firsterr) 2408 goto unwind; 2409 firsterr = rcu_torture_fwd_prog_init(); 2410 if (firsterr) 2411 goto unwind; 2412 firsterr = rcu_torture_barrier_init(); 2413 if (firsterr) 2414 goto unwind; 2415 if (object_debug) 2416 rcu_test_debug_objects(); 2417 torture_init_end(); 2418 return 0; 2419 2420 unwind: 2421 torture_init_end(); 2422 rcu_torture_cleanup(); 2423 return firsterr; 2424 } 2425 2426 module_init(rcu_torture_init); 2427 module_exit(rcu_torture_cleanup); 2428