1 /* 2 * Read-Copy Update module-based torture test facility 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2005, 2006 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Josh Triplett <josh@joshtriplett.org> 22 * 23 * See also: Documentation/RCU/torture.txt 24 */ 25 #include <linux/types.h> 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/kthread.h> 30 #include <linux/err.h> 31 #include <linux/spinlock.h> 32 #include <linux/smp.h> 33 #include <linux/rcupdate.h> 34 #include <linux/interrupt.h> 35 #include <linux/sched/signal.h> 36 #include <uapi/linux/sched/types.h> 37 #include <linux/atomic.h> 38 #include <linux/bitops.h> 39 #include <linux/completion.h> 40 #include <linux/moduleparam.h> 41 #include <linux/percpu.h> 42 #include <linux/notifier.h> 43 #include <linux/reboot.h> 44 #include <linux/freezer.h> 45 #include <linux/cpu.h> 46 #include <linux/delay.h> 47 #include <linux/stat.h> 48 #include <linux/srcu.h> 49 #include <linux/slab.h> 50 #include <linux/trace_clock.h> 51 #include <asm/byteorder.h> 52 #include <linux/torture.h> 53 #include <linux/vmalloc.h> 54 #include <linux/sched/debug.h> 55 56 #include "rcu.h" 57 58 MODULE_LICENSE("GPL"); 59 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 60 61 62 torture_param(int, cbflood_inter_holdoff, HZ, 63 "Holdoff between floods (jiffies)"); 64 torture_param(int, cbflood_intra_holdoff, 1, 65 "Holdoff between bursts (jiffies)"); 66 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); 67 torture_param(int, cbflood_n_per_burst, 20000, 68 "# callbacks per burst in flood"); 69 torture_param(int, fqs_duration, 0, 70 "Duration of fqs bursts (us), 0 to disable"); 71 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 72 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 73 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 74 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 75 torture_param(bool, gp_normal, false, 76 "Use normal (non-expedited) GP wait primitives"); 77 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 78 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 79 torture_param(int, n_barrier_cbs, 0, 80 "# of callbacks/kthreads for barrier testing"); 81 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 82 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 83 torture_param(int, object_debug, 0, 84 "Enable debug-object double call_rcu() testing"); 85 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 86 torture_param(int, onoff_interval, 0, 87 "Time between CPU hotplugs (s), 0=disable"); 88 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 89 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 90 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 91 torture_param(int, stall_cpu_holdoff, 10, 92 "Time to wait before starting stall (s)."); 93 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 94 torture_param(int, stat_interval, 60, 95 "Number of seconds between stats printk()s"); 96 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 97 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 98 torture_param(int, test_boost_duration, 4, 99 "Duration of each boost test, seconds."); 100 torture_param(int, test_boost_interval, 7, 101 "Interval between boost tests, seconds."); 102 torture_param(bool, test_no_idle_hz, true, 103 "Test support for tickless idle CPUs"); 104 torture_param(bool, verbose, true, 105 "Enable verbose debugging printk()s"); 106 107 static char *torture_type = "rcu"; 108 module_param(torture_type, charp, 0444); 109 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); 110 111 static int nrealreaders; 112 static int ncbflooders; 113 static struct task_struct *writer_task; 114 static struct task_struct **fakewriter_tasks; 115 static struct task_struct **reader_tasks; 116 static struct task_struct *stats_task; 117 static struct task_struct **cbflood_task; 118 static struct task_struct *fqs_task; 119 static struct task_struct *boost_tasks[NR_CPUS]; 120 static struct task_struct *stall_task; 121 static struct task_struct **barrier_cbs_tasks; 122 static struct task_struct *barrier_task; 123 124 #define RCU_TORTURE_PIPE_LEN 10 125 126 struct rcu_torture { 127 struct rcu_head rtort_rcu; 128 int rtort_pipe_count; 129 struct list_head rtort_free; 130 int rtort_mbtest; 131 }; 132 133 static LIST_HEAD(rcu_torture_freelist); 134 static struct rcu_torture __rcu *rcu_torture_current; 135 static unsigned long rcu_torture_current_version; 136 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 137 static DEFINE_SPINLOCK(rcu_torture_lock); 138 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 139 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 140 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 141 static atomic_t n_rcu_torture_alloc; 142 static atomic_t n_rcu_torture_alloc_fail; 143 static atomic_t n_rcu_torture_free; 144 static atomic_t n_rcu_torture_mberror; 145 static atomic_t n_rcu_torture_error; 146 static long n_rcu_torture_barrier_error; 147 static long n_rcu_torture_boost_ktrerror; 148 static long n_rcu_torture_boost_rterror; 149 static long n_rcu_torture_boost_failure; 150 static long n_rcu_torture_boosts; 151 static long n_rcu_torture_timers; 152 static long n_barrier_attempts; 153 static long n_barrier_successes; 154 static atomic_long_t n_cbfloods; 155 static struct list_head rcu_torture_removed; 156 157 static int rcu_torture_writer_state; 158 #define RTWS_FIXED_DELAY 0 159 #define RTWS_DELAY 1 160 #define RTWS_REPLACE 2 161 #define RTWS_DEF_FREE 3 162 #define RTWS_EXP_SYNC 4 163 #define RTWS_COND_GET 5 164 #define RTWS_COND_SYNC 6 165 #define RTWS_SYNC 7 166 #define RTWS_STUTTER 8 167 #define RTWS_STOPPING 9 168 static const char * const rcu_torture_writer_state_names[] = { 169 "RTWS_FIXED_DELAY", 170 "RTWS_DELAY", 171 "RTWS_REPLACE", 172 "RTWS_DEF_FREE", 173 "RTWS_EXP_SYNC", 174 "RTWS_COND_GET", 175 "RTWS_COND_SYNC", 176 "RTWS_SYNC", 177 "RTWS_STUTTER", 178 "RTWS_STOPPING", 179 }; 180 181 static const char *rcu_torture_writer_state_getname(void) 182 { 183 unsigned int i = READ_ONCE(rcu_torture_writer_state); 184 185 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 186 return "???"; 187 return rcu_torture_writer_state_names[i]; 188 } 189 190 static int torture_runnable = IS_ENABLED(MODULE); 191 module_param(torture_runnable, int, 0444); 192 MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot"); 193 194 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 195 #define rcu_can_boost() 1 196 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 197 #define rcu_can_boost() 0 198 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 199 200 #ifdef CONFIG_RCU_TRACE 201 static u64 notrace rcu_trace_clock_local(void) 202 { 203 u64 ts = trace_clock_local(); 204 205 (void)do_div(ts, NSEC_PER_USEC); 206 return ts; 207 } 208 #else /* #ifdef CONFIG_RCU_TRACE */ 209 static u64 notrace rcu_trace_clock_local(void) 210 { 211 return 0ULL; 212 } 213 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 214 215 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 216 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 217 /* and boost task create/destroy. */ 218 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 219 static bool barrier_phase; /* Test phase. */ 220 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 221 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 222 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 223 224 /* 225 * Allocate an element from the rcu_tortures pool. 226 */ 227 static struct rcu_torture * 228 rcu_torture_alloc(void) 229 { 230 struct list_head *p; 231 232 spin_lock_bh(&rcu_torture_lock); 233 if (list_empty(&rcu_torture_freelist)) { 234 atomic_inc(&n_rcu_torture_alloc_fail); 235 spin_unlock_bh(&rcu_torture_lock); 236 return NULL; 237 } 238 atomic_inc(&n_rcu_torture_alloc); 239 p = rcu_torture_freelist.next; 240 list_del_init(p); 241 spin_unlock_bh(&rcu_torture_lock); 242 return container_of(p, struct rcu_torture, rtort_free); 243 } 244 245 /* 246 * Free an element to the rcu_tortures pool. 247 */ 248 static void 249 rcu_torture_free(struct rcu_torture *p) 250 { 251 atomic_inc(&n_rcu_torture_free); 252 spin_lock_bh(&rcu_torture_lock); 253 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 254 spin_unlock_bh(&rcu_torture_lock); 255 } 256 257 /* 258 * Operations vector for selecting different types of tests. 259 */ 260 261 struct rcu_torture_ops { 262 int ttype; 263 void (*init)(void); 264 void (*cleanup)(void); 265 int (*readlock)(void); 266 void (*read_delay)(struct torture_random_state *rrsp); 267 void (*readunlock)(int idx); 268 unsigned long (*started)(void); 269 unsigned long (*completed)(void); 270 void (*deferred_free)(struct rcu_torture *p); 271 void (*sync)(void); 272 void (*exp_sync)(void); 273 unsigned long (*get_state)(void); 274 void (*cond_sync)(unsigned long oldstate); 275 call_rcu_func_t call; 276 void (*cb_barrier)(void); 277 void (*fqs)(void); 278 void (*stats)(void); 279 int irq_capable; 280 int can_boost; 281 const char *name; 282 }; 283 284 static struct rcu_torture_ops *cur_ops; 285 286 /* 287 * Definitions for rcu torture testing. 288 */ 289 290 static int rcu_torture_read_lock(void) __acquires(RCU) 291 { 292 rcu_read_lock(); 293 return 0; 294 } 295 296 static void rcu_read_delay(struct torture_random_state *rrsp) 297 { 298 unsigned long started; 299 unsigned long completed; 300 const unsigned long shortdelay_us = 200; 301 const unsigned long longdelay_ms = 50; 302 unsigned long long ts; 303 304 /* We want a short delay sometimes to make a reader delay the grace 305 * period, and we want a long delay occasionally to trigger 306 * force_quiescent_state. */ 307 308 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 309 started = cur_ops->completed(); 310 ts = rcu_trace_clock_local(); 311 mdelay(longdelay_ms); 312 completed = cur_ops->completed(); 313 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 314 started, completed); 315 } 316 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) 317 udelay(shortdelay_us); 318 #ifdef CONFIG_PREEMPT 319 if (!preempt_count() && 320 !(torture_random(rrsp) % (nrealreaders * 20000))) 321 preempt_schedule(); /* No QS if preempt_disable() in effect */ 322 #endif 323 } 324 325 static void rcu_torture_read_unlock(int idx) __releases(RCU) 326 { 327 rcu_read_unlock(); 328 } 329 330 /* 331 * Update callback in the pipe. This should be invoked after a grace period. 332 */ 333 static bool 334 rcu_torture_pipe_update_one(struct rcu_torture *rp) 335 { 336 int i; 337 338 i = rp->rtort_pipe_count; 339 if (i > RCU_TORTURE_PIPE_LEN) 340 i = RCU_TORTURE_PIPE_LEN; 341 atomic_inc(&rcu_torture_wcount[i]); 342 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 343 rp->rtort_mbtest = 0; 344 return true; 345 } 346 return false; 347 } 348 349 /* 350 * Update all callbacks in the pipe. Suitable for synchronous grace-period 351 * primitives. 352 */ 353 static void 354 rcu_torture_pipe_update(struct rcu_torture *old_rp) 355 { 356 struct rcu_torture *rp; 357 struct rcu_torture *rp1; 358 359 if (old_rp) 360 list_add(&old_rp->rtort_free, &rcu_torture_removed); 361 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 362 if (rcu_torture_pipe_update_one(rp)) { 363 list_del(&rp->rtort_free); 364 rcu_torture_free(rp); 365 } 366 } 367 } 368 369 static void 370 rcu_torture_cb(struct rcu_head *p) 371 { 372 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 373 374 if (torture_must_stop_irq()) { 375 /* Test is ending, just drop callbacks on the floor. */ 376 /* The next initialization will pick up the pieces. */ 377 return; 378 } 379 if (rcu_torture_pipe_update_one(rp)) 380 rcu_torture_free(rp); 381 else 382 cur_ops->deferred_free(rp); 383 } 384 385 static unsigned long rcu_no_completed(void) 386 { 387 return 0; 388 } 389 390 static void rcu_torture_deferred_free(struct rcu_torture *p) 391 { 392 call_rcu(&p->rtort_rcu, rcu_torture_cb); 393 } 394 395 static void rcu_sync_torture_init(void) 396 { 397 INIT_LIST_HEAD(&rcu_torture_removed); 398 } 399 400 static struct rcu_torture_ops rcu_ops = { 401 .ttype = RCU_FLAVOR, 402 .init = rcu_sync_torture_init, 403 .readlock = rcu_torture_read_lock, 404 .read_delay = rcu_read_delay, 405 .readunlock = rcu_torture_read_unlock, 406 .started = rcu_batches_started, 407 .completed = rcu_batches_completed, 408 .deferred_free = rcu_torture_deferred_free, 409 .sync = synchronize_rcu, 410 .exp_sync = synchronize_rcu_expedited, 411 .get_state = get_state_synchronize_rcu, 412 .cond_sync = cond_synchronize_rcu, 413 .call = call_rcu, 414 .cb_barrier = rcu_barrier, 415 .fqs = rcu_force_quiescent_state, 416 .stats = NULL, 417 .irq_capable = 1, 418 .can_boost = rcu_can_boost(), 419 .name = "rcu" 420 }; 421 422 /* 423 * Definitions for rcu_bh torture testing. 424 */ 425 426 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) 427 { 428 rcu_read_lock_bh(); 429 return 0; 430 } 431 432 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) 433 { 434 rcu_read_unlock_bh(); 435 } 436 437 static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 438 { 439 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 440 } 441 442 static struct rcu_torture_ops rcu_bh_ops = { 443 .ttype = RCU_BH_FLAVOR, 444 .init = rcu_sync_torture_init, 445 .readlock = rcu_bh_torture_read_lock, 446 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 447 .readunlock = rcu_bh_torture_read_unlock, 448 .started = rcu_batches_started_bh, 449 .completed = rcu_batches_completed_bh, 450 .deferred_free = rcu_bh_torture_deferred_free, 451 .sync = synchronize_rcu_bh, 452 .exp_sync = synchronize_rcu_bh_expedited, 453 .call = call_rcu_bh, 454 .cb_barrier = rcu_barrier_bh, 455 .fqs = rcu_bh_force_quiescent_state, 456 .stats = NULL, 457 .irq_capable = 1, 458 .name = "rcu_bh" 459 }; 460 461 /* 462 * Don't even think about trying any of these in real life!!! 463 * The names includes "busted", and they really means it! 464 * The only purpose of these functions is to provide a buggy RCU 465 * implementation to make sure that rcutorture correctly emits 466 * buggy-RCU error messages. 467 */ 468 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 469 { 470 /* This is a deliberate bug for testing purposes only! */ 471 rcu_torture_cb(&p->rtort_rcu); 472 } 473 474 static void synchronize_rcu_busted(void) 475 { 476 /* This is a deliberate bug for testing purposes only! */ 477 } 478 479 static void 480 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 481 { 482 /* This is a deliberate bug for testing purposes only! */ 483 func(head); 484 } 485 486 static struct rcu_torture_ops rcu_busted_ops = { 487 .ttype = INVALID_RCU_FLAVOR, 488 .init = rcu_sync_torture_init, 489 .readlock = rcu_torture_read_lock, 490 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 491 .readunlock = rcu_torture_read_unlock, 492 .started = rcu_no_completed, 493 .completed = rcu_no_completed, 494 .deferred_free = rcu_busted_torture_deferred_free, 495 .sync = synchronize_rcu_busted, 496 .exp_sync = synchronize_rcu_busted, 497 .call = call_rcu_busted, 498 .cb_barrier = NULL, 499 .fqs = NULL, 500 .stats = NULL, 501 .irq_capable = 1, 502 .name = "busted" 503 }; 504 505 /* 506 * Definitions for srcu torture testing. 507 */ 508 509 DEFINE_STATIC_SRCU(srcu_ctl); 510 static struct srcu_struct srcu_ctld; 511 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 512 513 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 514 { 515 return srcu_read_lock(srcu_ctlp); 516 } 517 518 static void srcu_read_delay(struct torture_random_state *rrsp) 519 { 520 long delay; 521 const long uspertick = 1000000 / HZ; 522 const long longdelay = 10; 523 524 /* We want there to be long-running readers, but not all the time. */ 525 526 delay = torture_random(rrsp) % 527 (nrealreaders * 2 * longdelay * uspertick); 528 if (!delay && in_task()) 529 schedule_timeout_interruptible(longdelay); 530 else 531 rcu_read_delay(rrsp); 532 } 533 534 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 535 { 536 srcu_read_unlock(srcu_ctlp, idx); 537 } 538 539 static unsigned long srcu_torture_completed(void) 540 { 541 return srcu_batches_completed(srcu_ctlp); 542 } 543 544 static void srcu_torture_deferred_free(struct rcu_torture *rp) 545 { 546 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 547 } 548 549 static void srcu_torture_synchronize(void) 550 { 551 synchronize_srcu(srcu_ctlp); 552 } 553 554 static void srcu_torture_call(struct rcu_head *head, 555 rcu_callback_t func) 556 { 557 call_srcu(srcu_ctlp, head, func); 558 } 559 560 static void srcu_torture_barrier(void) 561 { 562 srcu_barrier(srcu_ctlp); 563 } 564 565 static void srcu_torture_stats(void) 566 { 567 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 568 } 569 570 static void srcu_torture_synchronize_expedited(void) 571 { 572 synchronize_srcu_expedited(srcu_ctlp); 573 } 574 575 static struct rcu_torture_ops srcu_ops = { 576 .ttype = SRCU_FLAVOR, 577 .init = rcu_sync_torture_init, 578 .readlock = srcu_torture_read_lock, 579 .read_delay = srcu_read_delay, 580 .readunlock = srcu_torture_read_unlock, 581 .started = NULL, 582 .completed = srcu_torture_completed, 583 .deferred_free = srcu_torture_deferred_free, 584 .sync = srcu_torture_synchronize, 585 .exp_sync = srcu_torture_synchronize_expedited, 586 .call = srcu_torture_call, 587 .cb_barrier = srcu_torture_barrier, 588 .stats = srcu_torture_stats, 589 .irq_capable = 1, 590 .name = "srcu" 591 }; 592 593 static void srcu_torture_init(void) 594 { 595 rcu_sync_torture_init(); 596 WARN_ON(init_srcu_struct(&srcu_ctld)); 597 srcu_ctlp = &srcu_ctld; 598 } 599 600 static void srcu_torture_cleanup(void) 601 { 602 cleanup_srcu_struct(&srcu_ctld); 603 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 604 } 605 606 /* As above, but dynamically allocated. */ 607 static struct rcu_torture_ops srcud_ops = { 608 .ttype = SRCU_FLAVOR, 609 .init = srcu_torture_init, 610 .cleanup = srcu_torture_cleanup, 611 .readlock = srcu_torture_read_lock, 612 .read_delay = srcu_read_delay, 613 .readunlock = srcu_torture_read_unlock, 614 .started = NULL, 615 .completed = srcu_torture_completed, 616 .deferred_free = srcu_torture_deferred_free, 617 .sync = srcu_torture_synchronize, 618 .exp_sync = srcu_torture_synchronize_expedited, 619 .call = srcu_torture_call, 620 .cb_barrier = srcu_torture_barrier, 621 .stats = srcu_torture_stats, 622 .irq_capable = 1, 623 .name = "srcud" 624 }; 625 626 /* 627 * Definitions for sched torture testing. 628 */ 629 630 static int sched_torture_read_lock(void) 631 { 632 preempt_disable(); 633 return 0; 634 } 635 636 static void sched_torture_read_unlock(int idx) 637 { 638 preempt_enable(); 639 } 640 641 static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 642 { 643 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 644 } 645 646 static struct rcu_torture_ops sched_ops = { 647 .ttype = RCU_SCHED_FLAVOR, 648 .init = rcu_sync_torture_init, 649 .readlock = sched_torture_read_lock, 650 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 651 .readunlock = sched_torture_read_unlock, 652 .started = rcu_batches_started_sched, 653 .completed = rcu_batches_completed_sched, 654 .deferred_free = rcu_sched_torture_deferred_free, 655 .sync = synchronize_sched, 656 .exp_sync = synchronize_sched_expedited, 657 .get_state = get_state_synchronize_sched, 658 .cond_sync = cond_synchronize_sched, 659 .call = call_rcu_sched, 660 .cb_barrier = rcu_barrier_sched, 661 .fqs = rcu_sched_force_quiescent_state, 662 .stats = NULL, 663 .irq_capable = 1, 664 .name = "sched" 665 }; 666 667 /* 668 * Definitions for RCU-tasks torture testing. 669 */ 670 671 static int tasks_torture_read_lock(void) 672 { 673 return 0; 674 } 675 676 static void tasks_torture_read_unlock(int idx) 677 { 678 } 679 680 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 681 { 682 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 683 } 684 685 static struct rcu_torture_ops tasks_ops = { 686 .ttype = RCU_TASKS_FLAVOR, 687 .init = rcu_sync_torture_init, 688 .readlock = tasks_torture_read_lock, 689 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 690 .readunlock = tasks_torture_read_unlock, 691 .started = rcu_no_completed, 692 .completed = rcu_no_completed, 693 .deferred_free = rcu_tasks_torture_deferred_free, 694 .sync = synchronize_rcu_tasks, 695 .exp_sync = synchronize_rcu_tasks, 696 .call = call_rcu_tasks, 697 .cb_barrier = rcu_barrier_tasks, 698 .fqs = NULL, 699 .stats = NULL, 700 .irq_capable = 1, 701 .name = "tasks" 702 }; 703 704 static bool __maybe_unused torturing_tasks(void) 705 { 706 return cur_ops == &tasks_ops; 707 } 708 709 /* 710 * RCU torture priority-boost testing. Runs one real-time thread per 711 * CPU for moderate bursts, repeatedly registering RCU callbacks and 712 * spinning waiting for them to be invoked. If a given callback takes 713 * too long to be invoked, we assume that priority inversion has occurred. 714 */ 715 716 struct rcu_boost_inflight { 717 struct rcu_head rcu; 718 int inflight; 719 }; 720 721 static void rcu_torture_boost_cb(struct rcu_head *head) 722 { 723 struct rcu_boost_inflight *rbip = 724 container_of(head, struct rcu_boost_inflight, rcu); 725 726 /* Ensure RCU-core accesses precede clearing ->inflight */ 727 smp_store_release(&rbip->inflight, 0); 728 } 729 730 static int rcu_torture_boost(void *arg) 731 { 732 unsigned long call_rcu_time; 733 unsigned long endtime; 734 unsigned long oldstarttime; 735 struct rcu_boost_inflight rbi = { .inflight = 0 }; 736 struct sched_param sp; 737 738 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 739 740 /* Set real-time priority. */ 741 sp.sched_priority = 1; 742 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 743 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 744 n_rcu_torture_boost_rterror++; 745 } 746 747 init_rcu_head_on_stack(&rbi.rcu); 748 /* Each pass through the following loop does one boost-test cycle. */ 749 do { 750 /* Wait for the next test interval. */ 751 oldstarttime = boost_starttime; 752 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 753 schedule_timeout_interruptible(oldstarttime - jiffies); 754 stutter_wait("rcu_torture_boost"); 755 if (torture_must_stop()) 756 goto checkwait; 757 } 758 759 /* Do one boost-test interval. */ 760 endtime = oldstarttime + test_boost_duration * HZ; 761 call_rcu_time = jiffies; 762 while (ULONG_CMP_LT(jiffies, endtime)) { 763 /* If we don't have a callback in flight, post one. */ 764 if (!smp_load_acquire(&rbi.inflight)) { 765 /* RCU core before ->inflight = 1. */ 766 smp_store_release(&rbi.inflight, 1); 767 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 768 if (jiffies - call_rcu_time > 769 test_boost_duration * HZ - HZ / 2) { 770 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 771 n_rcu_torture_boost_failure++; 772 } 773 call_rcu_time = jiffies; 774 } 775 stutter_wait("rcu_torture_boost"); 776 if (torture_must_stop()) 777 goto checkwait; 778 } 779 780 /* 781 * Set the start time of the next test interval. 782 * Yes, this is vulnerable to long delays, but such 783 * delays simply cause a false negative for the next 784 * interval. Besides, we are running at RT priority, 785 * so delays should be relatively rare. 786 */ 787 while (oldstarttime == boost_starttime && 788 !kthread_should_stop()) { 789 if (mutex_trylock(&boost_mutex)) { 790 boost_starttime = jiffies + 791 test_boost_interval * HZ; 792 n_rcu_torture_boosts++; 793 mutex_unlock(&boost_mutex); 794 break; 795 } 796 schedule_timeout_uninterruptible(1); 797 } 798 799 /* Go do the stutter. */ 800 checkwait: stutter_wait("rcu_torture_boost"); 801 } while (!torture_must_stop()); 802 803 /* Clean up and exit. */ 804 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 805 torture_shutdown_absorb("rcu_torture_boost"); 806 schedule_timeout_uninterruptible(1); 807 } 808 destroy_rcu_head_on_stack(&rbi.rcu); 809 torture_kthread_stopping("rcu_torture_boost"); 810 return 0; 811 } 812 813 static void rcu_torture_cbflood_cb(struct rcu_head *rhp) 814 { 815 } 816 817 /* 818 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls 819 * to call_rcu() or analogous, increasing the probability of occurrence 820 * of callback-overflow corner cases. 821 */ 822 static int 823 rcu_torture_cbflood(void *arg) 824 { 825 int err = 1; 826 int i; 827 int j; 828 struct rcu_head *rhp; 829 830 if (cbflood_n_per_burst > 0 && 831 cbflood_inter_holdoff > 0 && 832 cbflood_intra_holdoff > 0 && 833 cur_ops->call && 834 cur_ops->cb_barrier) { 835 rhp = vmalloc(sizeof(*rhp) * 836 cbflood_n_burst * cbflood_n_per_burst); 837 err = !rhp; 838 } 839 if (err) { 840 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM"); 841 goto wait_for_stop; 842 } 843 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started"); 844 do { 845 schedule_timeout_interruptible(cbflood_inter_holdoff); 846 atomic_long_inc(&n_cbfloods); 847 WARN_ON(signal_pending(current)); 848 for (i = 0; i < cbflood_n_burst; i++) { 849 for (j = 0; j < cbflood_n_per_burst; j++) { 850 cur_ops->call(&rhp[i * cbflood_n_per_burst + j], 851 rcu_torture_cbflood_cb); 852 } 853 schedule_timeout_interruptible(cbflood_intra_holdoff); 854 WARN_ON(signal_pending(current)); 855 } 856 cur_ops->cb_barrier(); 857 stutter_wait("rcu_torture_cbflood"); 858 } while (!torture_must_stop()); 859 vfree(rhp); 860 wait_for_stop: 861 torture_kthread_stopping("rcu_torture_cbflood"); 862 return 0; 863 } 864 865 /* 866 * RCU torture force-quiescent-state kthread. Repeatedly induces 867 * bursts of calls to force_quiescent_state(), increasing the probability 868 * of occurrence of some important types of race conditions. 869 */ 870 static int 871 rcu_torture_fqs(void *arg) 872 { 873 unsigned long fqs_resume_time; 874 int fqs_burst_remaining; 875 876 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 877 do { 878 fqs_resume_time = jiffies + fqs_stutter * HZ; 879 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 880 !kthread_should_stop()) { 881 schedule_timeout_interruptible(1); 882 } 883 fqs_burst_remaining = fqs_duration; 884 while (fqs_burst_remaining > 0 && 885 !kthread_should_stop()) { 886 cur_ops->fqs(); 887 udelay(fqs_holdoff); 888 fqs_burst_remaining -= fqs_holdoff; 889 } 890 stutter_wait("rcu_torture_fqs"); 891 } while (!torture_must_stop()); 892 torture_kthread_stopping("rcu_torture_fqs"); 893 return 0; 894 } 895 896 /* 897 * RCU torture writer kthread. Repeatedly substitutes a new structure 898 * for that pointed to by rcu_torture_current, freeing the old structure 899 * after a series of grace periods (the "pipeline"). 900 */ 901 static int 902 rcu_torture_writer(void *arg) 903 { 904 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 905 int expediting = 0; 906 unsigned long gp_snap; 907 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 908 bool gp_sync1 = gp_sync; 909 int i; 910 struct rcu_torture *rp; 911 struct rcu_torture *old_rp; 912 static DEFINE_TORTURE_RANDOM(rand); 913 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 914 RTWS_COND_GET, RTWS_SYNC }; 915 int nsynctypes = 0; 916 917 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 918 if (!can_expedite) { 919 pr_alert("%s" TORTURE_FLAG 920 " GP expediting controlled from boot/sysfs for %s,\n", 921 torture_type, cur_ops->name); 922 pr_alert("%s" TORTURE_FLAG 923 " Disabled dynamic grace-period expediting.\n", 924 torture_type); 925 } 926 927 /* Initialize synctype[] array. If none set, take default. */ 928 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 929 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 930 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) 931 synctype[nsynctypes++] = RTWS_COND_GET; 932 else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) 933 pr_alert("rcu_torture_writer: gp_cond without primitives.\n"); 934 if (gp_exp1 && cur_ops->exp_sync) 935 synctype[nsynctypes++] = RTWS_EXP_SYNC; 936 else if (gp_exp && !cur_ops->exp_sync) 937 pr_alert("rcu_torture_writer: gp_exp without primitives.\n"); 938 if (gp_normal1 && cur_ops->deferred_free) 939 synctype[nsynctypes++] = RTWS_DEF_FREE; 940 else if (gp_normal && !cur_ops->deferred_free) 941 pr_alert("rcu_torture_writer: gp_normal without primitives.\n"); 942 if (gp_sync1 && cur_ops->sync) 943 synctype[nsynctypes++] = RTWS_SYNC; 944 else if (gp_sync && !cur_ops->sync) 945 pr_alert("rcu_torture_writer: gp_sync without primitives.\n"); 946 if (WARN_ONCE(nsynctypes == 0, 947 "rcu_torture_writer: No update-side primitives.\n")) { 948 /* 949 * No updates primitives, so don't try updating. 950 * The resulting test won't be testing much, hence the 951 * above WARN_ONCE(). 952 */ 953 rcu_torture_writer_state = RTWS_STOPPING; 954 torture_kthread_stopping("rcu_torture_writer"); 955 } 956 957 do { 958 rcu_torture_writer_state = RTWS_FIXED_DELAY; 959 schedule_timeout_uninterruptible(1); 960 rp = rcu_torture_alloc(); 961 if (rp == NULL) 962 continue; 963 rp->rtort_pipe_count = 0; 964 rcu_torture_writer_state = RTWS_DELAY; 965 udelay(torture_random(&rand) & 0x3ff); 966 rcu_torture_writer_state = RTWS_REPLACE; 967 old_rp = rcu_dereference_check(rcu_torture_current, 968 current == writer_task); 969 rp->rtort_mbtest = 1; 970 rcu_assign_pointer(rcu_torture_current, rp); 971 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 972 if (old_rp) { 973 i = old_rp->rtort_pipe_count; 974 if (i > RCU_TORTURE_PIPE_LEN) 975 i = RCU_TORTURE_PIPE_LEN; 976 atomic_inc(&rcu_torture_wcount[i]); 977 old_rp->rtort_pipe_count++; 978 switch (synctype[torture_random(&rand) % nsynctypes]) { 979 case RTWS_DEF_FREE: 980 rcu_torture_writer_state = RTWS_DEF_FREE; 981 cur_ops->deferred_free(old_rp); 982 break; 983 case RTWS_EXP_SYNC: 984 rcu_torture_writer_state = RTWS_EXP_SYNC; 985 cur_ops->exp_sync(); 986 rcu_torture_pipe_update(old_rp); 987 break; 988 case RTWS_COND_GET: 989 rcu_torture_writer_state = RTWS_COND_GET; 990 gp_snap = cur_ops->get_state(); 991 i = torture_random(&rand) % 16; 992 if (i != 0) 993 schedule_timeout_interruptible(i); 994 udelay(torture_random(&rand) % 1000); 995 rcu_torture_writer_state = RTWS_COND_SYNC; 996 cur_ops->cond_sync(gp_snap); 997 rcu_torture_pipe_update(old_rp); 998 break; 999 case RTWS_SYNC: 1000 rcu_torture_writer_state = RTWS_SYNC; 1001 cur_ops->sync(); 1002 rcu_torture_pipe_update(old_rp); 1003 break; 1004 default: 1005 WARN_ON_ONCE(1); 1006 break; 1007 } 1008 } 1009 rcutorture_record_progress(++rcu_torture_current_version); 1010 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1011 if (can_expedite && 1012 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1013 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1014 if (expediting >= 0) 1015 rcu_expedite_gp(); 1016 else 1017 rcu_unexpedite_gp(); 1018 if (++expediting > 3) 1019 expediting = -expediting; 1020 } 1021 rcu_torture_writer_state = RTWS_STUTTER; 1022 stutter_wait("rcu_torture_writer"); 1023 } while (!torture_must_stop()); 1024 /* Reset expediting back to unexpedited. */ 1025 if (expediting > 0) 1026 expediting = -expediting; 1027 while (can_expedite && expediting++ < 0) 1028 rcu_unexpedite_gp(); 1029 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1030 rcu_torture_writer_state = RTWS_STOPPING; 1031 torture_kthread_stopping("rcu_torture_writer"); 1032 return 0; 1033 } 1034 1035 /* 1036 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1037 * delay between calls. 1038 */ 1039 static int 1040 rcu_torture_fakewriter(void *arg) 1041 { 1042 DEFINE_TORTURE_RANDOM(rand); 1043 1044 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1045 set_user_nice(current, MAX_NICE); 1046 1047 do { 1048 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1049 udelay(torture_random(&rand) & 0x3ff); 1050 if (cur_ops->cb_barrier != NULL && 1051 torture_random(&rand) % (nfakewriters * 8) == 0) { 1052 cur_ops->cb_barrier(); 1053 } else if (gp_normal == gp_exp) { 1054 if (torture_random(&rand) & 0x80) 1055 cur_ops->sync(); 1056 else 1057 cur_ops->exp_sync(); 1058 } else if (gp_normal) { 1059 cur_ops->sync(); 1060 } else { 1061 cur_ops->exp_sync(); 1062 } 1063 stutter_wait("rcu_torture_fakewriter"); 1064 } while (!torture_must_stop()); 1065 1066 torture_kthread_stopping("rcu_torture_fakewriter"); 1067 return 0; 1068 } 1069 1070 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1071 { 1072 kfree(rhp); 1073 } 1074 1075 /* 1076 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1077 * incrementing the corresponding element of the pipeline array. The 1078 * counter in the element should never be greater than 1, otherwise, the 1079 * RCU implementation is broken. 1080 */ 1081 static void rcu_torture_timer(struct timer_list *unused) 1082 { 1083 int idx; 1084 unsigned long started; 1085 unsigned long completed; 1086 static DEFINE_TORTURE_RANDOM(rand); 1087 static DEFINE_SPINLOCK(rand_lock); 1088 struct rcu_torture *p; 1089 int pipe_count; 1090 unsigned long long ts; 1091 1092 idx = cur_ops->readlock(); 1093 if (cur_ops->started) 1094 started = cur_ops->started(); 1095 else 1096 started = cur_ops->completed(); 1097 ts = rcu_trace_clock_local(); 1098 p = rcu_dereference_check(rcu_torture_current, 1099 rcu_read_lock_bh_held() || 1100 rcu_read_lock_sched_held() || 1101 srcu_read_lock_held(srcu_ctlp) || 1102 torturing_tasks()); 1103 if (p == NULL) { 1104 /* Leave because rcu_torture_writer is not yet underway */ 1105 cur_ops->readunlock(idx); 1106 return; 1107 } 1108 if (p->rtort_mbtest == 0) 1109 atomic_inc(&n_rcu_torture_mberror); 1110 spin_lock(&rand_lock); 1111 cur_ops->read_delay(&rand); 1112 n_rcu_torture_timers++; 1113 spin_unlock(&rand_lock); 1114 preempt_disable(); 1115 pipe_count = p->rtort_pipe_count; 1116 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1117 /* Should not happen, but... */ 1118 pipe_count = RCU_TORTURE_PIPE_LEN; 1119 } 1120 completed = cur_ops->completed(); 1121 if (pipe_count > 1) { 1122 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, 1123 started, completed); 1124 rcu_ftrace_dump(DUMP_ALL); 1125 } 1126 __this_cpu_inc(rcu_torture_count[pipe_count]); 1127 completed = completed - started; 1128 if (cur_ops->started) 1129 completed++; 1130 if (completed > RCU_TORTURE_PIPE_LEN) { 1131 /* Should not happen, but... */ 1132 completed = RCU_TORTURE_PIPE_LEN; 1133 } 1134 __this_cpu_inc(rcu_torture_batch[completed]); 1135 preempt_enable(); 1136 cur_ops->readunlock(idx); 1137 1138 /* Test call_rcu() invocation from interrupt handler. */ 1139 if (cur_ops->call) { 1140 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1141 1142 if (rhp) 1143 cur_ops->call(rhp, rcu_torture_timer_cb); 1144 } 1145 } 1146 1147 /* 1148 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1149 * incrementing the corresponding element of the pipeline array. The 1150 * counter in the element should never be greater than 1, otherwise, the 1151 * RCU implementation is broken. 1152 */ 1153 static int 1154 rcu_torture_reader(void *arg) 1155 { 1156 unsigned long started; 1157 unsigned long completed; 1158 int idx; 1159 DEFINE_TORTURE_RANDOM(rand); 1160 struct rcu_torture *p; 1161 int pipe_count; 1162 struct timer_list t; 1163 unsigned long long ts; 1164 1165 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1166 set_user_nice(current, MAX_NICE); 1167 if (irqreader && cur_ops->irq_capable) 1168 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1169 1170 do { 1171 if (irqreader && cur_ops->irq_capable) { 1172 if (!timer_pending(&t)) 1173 mod_timer(&t, jiffies + 1); 1174 } 1175 idx = cur_ops->readlock(); 1176 if (cur_ops->started) 1177 started = cur_ops->started(); 1178 else 1179 started = cur_ops->completed(); 1180 ts = rcu_trace_clock_local(); 1181 p = rcu_dereference_check(rcu_torture_current, 1182 rcu_read_lock_bh_held() || 1183 rcu_read_lock_sched_held() || 1184 srcu_read_lock_held(srcu_ctlp) || 1185 torturing_tasks()); 1186 if (p == NULL) { 1187 /* Wait for rcu_torture_writer to get underway */ 1188 cur_ops->readunlock(idx); 1189 schedule_timeout_interruptible(HZ); 1190 continue; 1191 } 1192 if (p->rtort_mbtest == 0) 1193 atomic_inc(&n_rcu_torture_mberror); 1194 cur_ops->read_delay(&rand); 1195 preempt_disable(); 1196 pipe_count = p->rtort_pipe_count; 1197 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1198 /* Should not happen, but... */ 1199 pipe_count = RCU_TORTURE_PIPE_LEN; 1200 } 1201 completed = cur_ops->completed(); 1202 if (pipe_count > 1) { 1203 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1204 ts, started, completed); 1205 rcu_ftrace_dump(DUMP_ALL); 1206 } 1207 __this_cpu_inc(rcu_torture_count[pipe_count]); 1208 completed = completed - started; 1209 if (cur_ops->started) 1210 completed++; 1211 if (completed > RCU_TORTURE_PIPE_LEN) { 1212 /* Should not happen, but... */ 1213 completed = RCU_TORTURE_PIPE_LEN; 1214 } 1215 __this_cpu_inc(rcu_torture_batch[completed]); 1216 preempt_enable(); 1217 cur_ops->readunlock(idx); 1218 stutter_wait("rcu_torture_reader"); 1219 } while (!torture_must_stop()); 1220 if (irqreader && cur_ops->irq_capable) { 1221 del_timer_sync(&t); 1222 destroy_timer_on_stack(&t); 1223 } 1224 torture_kthread_stopping("rcu_torture_reader"); 1225 return 0; 1226 } 1227 1228 /* 1229 * Print torture statistics. Caller must ensure that there is only 1230 * one call to this function at a given time!!! This is normally 1231 * accomplished by relying on the module system to only have one copy 1232 * of the module loaded, and then by giving the rcu_torture_stats 1233 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1234 * thread is not running). 1235 */ 1236 static void 1237 rcu_torture_stats_print(void) 1238 { 1239 int cpu; 1240 int i; 1241 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1242 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1243 static unsigned long rtcv_snap = ULONG_MAX; 1244 static bool splatted; 1245 struct task_struct *wtp; 1246 1247 for_each_possible_cpu(cpu) { 1248 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1249 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1250 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1251 } 1252 } 1253 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1254 if (pipesummary[i] != 0) 1255 break; 1256 } 1257 1258 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1259 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1260 rcu_torture_current, 1261 rcu_torture_current_version, 1262 list_empty(&rcu_torture_freelist), 1263 atomic_read(&n_rcu_torture_alloc), 1264 atomic_read(&n_rcu_torture_alloc_fail), 1265 atomic_read(&n_rcu_torture_free)); 1266 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1267 atomic_read(&n_rcu_torture_mberror), 1268 n_rcu_torture_barrier_error, 1269 n_rcu_torture_boost_ktrerror, 1270 n_rcu_torture_boost_rterror); 1271 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1272 n_rcu_torture_boost_failure, 1273 n_rcu_torture_boosts, 1274 n_rcu_torture_timers); 1275 torture_onoff_stats(); 1276 pr_cont("barrier: %ld/%ld:%ld ", 1277 n_barrier_successes, 1278 n_barrier_attempts, 1279 n_rcu_torture_barrier_error); 1280 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods)); 1281 1282 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1283 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1284 n_rcu_torture_barrier_error != 0 || 1285 n_rcu_torture_boost_ktrerror != 0 || 1286 n_rcu_torture_boost_rterror != 0 || 1287 n_rcu_torture_boost_failure != 0 || 1288 i > 1) { 1289 pr_cont("%s", "!!! "); 1290 atomic_inc(&n_rcu_torture_error); 1291 WARN_ON_ONCE(1); 1292 } 1293 pr_cont("Reader Pipe: "); 1294 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1295 pr_cont(" %ld", pipesummary[i]); 1296 pr_cont("\n"); 1297 1298 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1299 pr_cont("Reader Batch: "); 1300 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1301 pr_cont(" %ld", batchsummary[i]); 1302 pr_cont("\n"); 1303 1304 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1305 pr_cont("Free-Block Circulation: "); 1306 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1307 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1308 } 1309 pr_cont("\n"); 1310 1311 if (cur_ops->stats) 1312 cur_ops->stats(); 1313 if (rtcv_snap == rcu_torture_current_version && 1314 rcu_torture_current != NULL) { 1315 int __maybe_unused flags = 0; 1316 unsigned long __maybe_unused gpnum = 0; 1317 unsigned long __maybe_unused completed = 0; 1318 1319 rcutorture_get_gp_data(cur_ops->ttype, 1320 &flags, &gpnum, &completed); 1321 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1322 &flags, &gpnum, &completed); 1323 wtp = READ_ONCE(writer_task); 1324 pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n", 1325 rcu_torture_writer_state_getname(), 1326 rcu_torture_writer_state, 1327 gpnum, completed, flags, 1328 wtp == NULL ? ~0UL : wtp->state, 1329 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1330 if (!splatted && wtp) { 1331 sched_show_task(wtp); 1332 splatted = true; 1333 } 1334 show_rcu_gp_kthreads(); 1335 rcu_ftrace_dump(DUMP_ALL); 1336 } 1337 rtcv_snap = rcu_torture_current_version; 1338 } 1339 1340 /* 1341 * Periodically prints torture statistics, if periodic statistics printing 1342 * was specified via the stat_interval module parameter. 1343 */ 1344 static int 1345 rcu_torture_stats(void *arg) 1346 { 1347 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1348 do { 1349 schedule_timeout_interruptible(stat_interval * HZ); 1350 rcu_torture_stats_print(); 1351 torture_shutdown_absorb("rcu_torture_stats"); 1352 } while (!torture_must_stop()); 1353 torture_kthread_stopping("rcu_torture_stats"); 1354 return 0; 1355 } 1356 1357 static inline void 1358 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1359 { 1360 pr_alert("%s" TORTURE_FLAG 1361 "--- %s: nreaders=%d nfakewriters=%d " 1362 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1363 "shuffle_interval=%d stutter=%d irqreader=%d " 1364 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1365 "test_boost=%d/%d test_boost_interval=%d " 1366 "test_boost_duration=%d shutdown_secs=%d " 1367 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1368 "n_barrier_cbs=%d " 1369 "onoff_interval=%d onoff_holdoff=%d\n", 1370 torture_type, tag, nrealreaders, nfakewriters, 1371 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1372 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1373 test_boost, cur_ops->can_boost, 1374 test_boost_interval, test_boost_duration, shutdown_secs, 1375 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1376 n_barrier_cbs, 1377 onoff_interval, onoff_holdoff); 1378 } 1379 1380 static int rcutorture_booster_cleanup(unsigned int cpu) 1381 { 1382 struct task_struct *t; 1383 1384 if (boost_tasks[cpu] == NULL) 1385 return 0; 1386 mutex_lock(&boost_mutex); 1387 t = boost_tasks[cpu]; 1388 boost_tasks[cpu] = NULL; 1389 mutex_unlock(&boost_mutex); 1390 1391 /* This must be outside of the mutex, otherwise deadlock! */ 1392 torture_stop_kthread(rcu_torture_boost, t); 1393 return 0; 1394 } 1395 1396 static int rcutorture_booster_init(unsigned int cpu) 1397 { 1398 int retval; 1399 1400 if (boost_tasks[cpu] != NULL) 1401 return 0; /* Already created, nothing more to do. */ 1402 1403 /* Don't allow time recalculation while creating a new task. */ 1404 mutex_lock(&boost_mutex); 1405 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1406 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1407 cpu_to_node(cpu), 1408 "rcu_torture_boost"); 1409 if (IS_ERR(boost_tasks[cpu])) { 1410 retval = PTR_ERR(boost_tasks[cpu]); 1411 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1412 n_rcu_torture_boost_ktrerror++; 1413 boost_tasks[cpu] = NULL; 1414 mutex_unlock(&boost_mutex); 1415 return retval; 1416 } 1417 kthread_bind(boost_tasks[cpu], cpu); 1418 wake_up_process(boost_tasks[cpu]); 1419 mutex_unlock(&boost_mutex); 1420 return 0; 1421 } 1422 1423 /* 1424 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1425 * induces a CPU stall for the time specified by stall_cpu. 1426 */ 1427 static int rcu_torture_stall(void *args) 1428 { 1429 unsigned long stop_at; 1430 1431 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1432 if (stall_cpu_holdoff > 0) { 1433 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1434 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1435 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1436 } 1437 if (!kthread_should_stop()) { 1438 stop_at = get_seconds() + stall_cpu; 1439 /* RCU CPU stall is expected behavior in following code. */ 1440 rcu_read_lock(); 1441 if (stall_cpu_irqsoff) 1442 local_irq_disable(); 1443 else 1444 preempt_disable(); 1445 pr_alert("rcu_torture_stall start on CPU %d.\n", 1446 smp_processor_id()); 1447 while (ULONG_CMP_LT(get_seconds(), stop_at)) 1448 continue; /* Induce RCU CPU stall warning. */ 1449 if (stall_cpu_irqsoff) 1450 local_irq_enable(); 1451 else 1452 preempt_enable(); 1453 rcu_read_unlock(); 1454 pr_alert("rcu_torture_stall end.\n"); 1455 } 1456 torture_shutdown_absorb("rcu_torture_stall"); 1457 while (!kthread_should_stop()) 1458 schedule_timeout_interruptible(10 * HZ); 1459 return 0; 1460 } 1461 1462 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1463 static int __init rcu_torture_stall_init(void) 1464 { 1465 if (stall_cpu <= 0) 1466 return 0; 1467 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1468 } 1469 1470 /* Callback function for RCU barrier testing. */ 1471 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1472 { 1473 atomic_inc(&barrier_cbs_invoked); 1474 } 1475 1476 /* kthread function to register callbacks used to test RCU barriers. */ 1477 static int rcu_torture_barrier_cbs(void *arg) 1478 { 1479 long myid = (long)arg; 1480 bool lastphase = 0; 1481 bool newphase; 1482 struct rcu_head rcu; 1483 1484 init_rcu_head_on_stack(&rcu); 1485 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 1486 set_user_nice(current, MAX_NICE); 1487 do { 1488 wait_event(barrier_cbs_wq[myid], 1489 (newphase = 1490 smp_load_acquire(&barrier_phase)) != lastphase || 1491 torture_must_stop()); 1492 lastphase = newphase; 1493 if (torture_must_stop()) 1494 break; 1495 /* 1496 * The above smp_load_acquire() ensures barrier_phase load 1497 * is ordered before the following ->call(). 1498 */ 1499 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 1500 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1501 local_irq_enable(); 1502 if (atomic_dec_and_test(&barrier_cbs_count)) 1503 wake_up(&barrier_wq); 1504 } while (!torture_must_stop()); 1505 if (cur_ops->cb_barrier != NULL) 1506 cur_ops->cb_barrier(); 1507 destroy_rcu_head_on_stack(&rcu); 1508 torture_kthread_stopping("rcu_torture_barrier_cbs"); 1509 return 0; 1510 } 1511 1512 /* kthread function to drive and coordinate RCU barrier testing. */ 1513 static int rcu_torture_barrier(void *arg) 1514 { 1515 int i; 1516 1517 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 1518 do { 1519 atomic_set(&barrier_cbs_invoked, 0); 1520 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1521 /* Ensure barrier_phase ordered after prior assignments. */ 1522 smp_store_release(&barrier_phase, !barrier_phase); 1523 for (i = 0; i < n_barrier_cbs; i++) 1524 wake_up(&barrier_cbs_wq[i]); 1525 wait_event(barrier_wq, 1526 atomic_read(&barrier_cbs_count) == 0 || 1527 torture_must_stop()); 1528 if (torture_must_stop()) 1529 break; 1530 n_barrier_attempts++; 1531 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1532 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1533 n_rcu_torture_barrier_error++; 1534 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 1535 atomic_read(&barrier_cbs_invoked), 1536 n_barrier_cbs); 1537 WARN_ON_ONCE(1); 1538 } 1539 n_barrier_successes++; 1540 schedule_timeout_interruptible(HZ / 10); 1541 } while (!torture_must_stop()); 1542 torture_kthread_stopping("rcu_torture_barrier"); 1543 return 0; 1544 } 1545 1546 /* Initialize RCU barrier testing. */ 1547 static int rcu_torture_barrier_init(void) 1548 { 1549 int i; 1550 int ret; 1551 1552 if (n_barrier_cbs <= 0) 1553 return 0; 1554 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 1555 pr_alert("%s" TORTURE_FLAG 1556 " Call or barrier ops missing for %s,\n", 1557 torture_type, cur_ops->name); 1558 pr_alert("%s" TORTURE_FLAG 1559 " RCU barrier testing omitted from run.\n", 1560 torture_type); 1561 return 0; 1562 } 1563 atomic_set(&barrier_cbs_count, 0); 1564 atomic_set(&barrier_cbs_invoked, 0); 1565 barrier_cbs_tasks = 1566 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]), 1567 GFP_KERNEL); 1568 barrier_cbs_wq = 1569 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]), 1570 GFP_KERNEL); 1571 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 1572 return -ENOMEM; 1573 for (i = 0; i < n_barrier_cbs; i++) { 1574 init_waitqueue_head(&barrier_cbs_wq[i]); 1575 ret = torture_create_kthread(rcu_torture_barrier_cbs, 1576 (void *)(long)i, 1577 barrier_cbs_tasks[i]); 1578 if (ret) 1579 return ret; 1580 } 1581 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 1582 } 1583 1584 /* Clean up after RCU barrier testing. */ 1585 static void rcu_torture_barrier_cleanup(void) 1586 { 1587 int i; 1588 1589 torture_stop_kthread(rcu_torture_barrier, barrier_task); 1590 if (barrier_cbs_tasks != NULL) { 1591 for (i = 0; i < n_barrier_cbs; i++) 1592 torture_stop_kthread(rcu_torture_barrier_cbs, 1593 barrier_cbs_tasks[i]); 1594 kfree(barrier_cbs_tasks); 1595 barrier_cbs_tasks = NULL; 1596 } 1597 if (barrier_cbs_wq != NULL) { 1598 kfree(barrier_cbs_wq); 1599 barrier_cbs_wq = NULL; 1600 } 1601 } 1602 1603 static enum cpuhp_state rcutor_hp; 1604 1605 static void 1606 rcu_torture_cleanup(void) 1607 { 1608 int i; 1609 1610 rcutorture_record_test_transition(); 1611 if (torture_cleanup_begin()) { 1612 if (cur_ops->cb_barrier != NULL) 1613 cur_ops->cb_barrier(); 1614 return; 1615 } 1616 1617 rcu_torture_barrier_cleanup(); 1618 torture_stop_kthread(rcu_torture_stall, stall_task); 1619 torture_stop_kthread(rcu_torture_writer, writer_task); 1620 1621 if (reader_tasks) { 1622 for (i = 0; i < nrealreaders; i++) 1623 torture_stop_kthread(rcu_torture_reader, 1624 reader_tasks[i]); 1625 kfree(reader_tasks); 1626 } 1627 rcu_torture_current = NULL; 1628 1629 if (fakewriter_tasks) { 1630 for (i = 0; i < nfakewriters; i++) { 1631 torture_stop_kthread(rcu_torture_fakewriter, 1632 fakewriter_tasks[i]); 1633 } 1634 kfree(fakewriter_tasks); 1635 fakewriter_tasks = NULL; 1636 } 1637 1638 torture_stop_kthread(rcu_torture_stats, stats_task); 1639 torture_stop_kthread(rcu_torture_fqs, fqs_task); 1640 for (i = 0; i < ncbflooders; i++) 1641 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); 1642 if ((test_boost == 1 && cur_ops->can_boost) || 1643 test_boost == 2) 1644 cpuhp_remove_state(rcutor_hp); 1645 1646 /* 1647 * Wait for all RCU callbacks to fire, then do flavor-specific 1648 * cleanup operations. 1649 */ 1650 if (cur_ops->cb_barrier != NULL) 1651 cur_ops->cb_barrier(); 1652 if (cur_ops->cleanup != NULL) 1653 cur_ops->cleanup(); 1654 1655 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 1656 1657 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 1658 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 1659 else if (torture_onoff_failures()) 1660 rcu_torture_print_module_parms(cur_ops, 1661 "End of test: RCU_HOTPLUG"); 1662 else 1663 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 1664 torture_cleanup_end(); 1665 } 1666 1667 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1668 static void rcu_torture_leak_cb(struct rcu_head *rhp) 1669 { 1670 } 1671 1672 static void rcu_torture_err_cb(struct rcu_head *rhp) 1673 { 1674 /* 1675 * This -might- happen due to race conditions, but is unlikely. 1676 * The scenario that leads to this happening is that the 1677 * first of the pair of duplicate callbacks is queued, 1678 * someone else starts a grace period that includes that 1679 * callback, then the second of the pair must wait for the 1680 * next grace period. Unlikely, but can happen. If it 1681 * does happen, the debug-objects subsystem won't have splatted. 1682 */ 1683 pr_alert("rcutorture: duplicated callback was invoked.\n"); 1684 } 1685 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1686 1687 /* 1688 * Verify that double-free causes debug-objects to complain, but only 1689 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 1690 * cannot be carried out. 1691 */ 1692 static void rcu_test_debug_objects(void) 1693 { 1694 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1695 struct rcu_head rh1; 1696 struct rcu_head rh2; 1697 1698 init_rcu_head_on_stack(&rh1); 1699 init_rcu_head_on_stack(&rh2); 1700 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n"); 1701 1702 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 1703 preempt_disable(); /* Prevent preemption from interrupting test. */ 1704 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 1705 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 1706 local_irq_disable(); /* Make it harder to start a new grace period. */ 1707 call_rcu(&rh2, rcu_torture_leak_cb); 1708 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 1709 local_irq_enable(); 1710 rcu_read_unlock(); 1711 preempt_enable(); 1712 1713 /* Wait for them all to get done so we can safely return. */ 1714 rcu_barrier(); 1715 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n"); 1716 destroy_rcu_head_on_stack(&rh1); 1717 destroy_rcu_head_on_stack(&rh2); 1718 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1719 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n"); 1720 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1721 } 1722 1723 static int __init 1724 rcu_torture_init(void) 1725 { 1726 int i; 1727 int cpu; 1728 int firsterr = 0; 1729 static struct rcu_torture_ops *torture_ops[] = { 1730 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 1731 &sched_ops, &tasks_ops, 1732 }; 1733 1734 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 1735 return -EBUSY; 1736 1737 /* Process args and tell the world that the torturer is on the job. */ 1738 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 1739 cur_ops = torture_ops[i]; 1740 if (strcmp(torture_type, cur_ops->name) == 0) 1741 break; 1742 } 1743 if (i == ARRAY_SIZE(torture_ops)) { 1744 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 1745 torture_type); 1746 pr_alert("rcu-torture types:"); 1747 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1748 pr_alert(" %s", torture_ops[i]->name); 1749 pr_alert("\n"); 1750 firsterr = -EINVAL; 1751 goto unwind; 1752 } 1753 if (cur_ops->fqs == NULL && fqs_duration != 0) { 1754 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 1755 fqs_duration = 0; 1756 } 1757 if (cur_ops->init) 1758 cur_ops->init(); 1759 1760 if (nreaders >= 0) { 1761 nrealreaders = nreaders; 1762 } else { 1763 nrealreaders = num_online_cpus() - 2 - nreaders; 1764 if (nrealreaders <= 0) 1765 nrealreaders = 1; 1766 } 1767 rcu_torture_print_module_parms(cur_ops, "Start of test"); 1768 1769 /* Set up the freelist. */ 1770 1771 INIT_LIST_HEAD(&rcu_torture_freelist); 1772 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 1773 rcu_tortures[i].rtort_mbtest = 0; 1774 list_add_tail(&rcu_tortures[i].rtort_free, 1775 &rcu_torture_freelist); 1776 } 1777 1778 /* Initialize the statistics so that each run gets its own numbers. */ 1779 1780 rcu_torture_current = NULL; 1781 rcu_torture_current_version = 0; 1782 atomic_set(&n_rcu_torture_alloc, 0); 1783 atomic_set(&n_rcu_torture_alloc_fail, 0); 1784 atomic_set(&n_rcu_torture_free, 0); 1785 atomic_set(&n_rcu_torture_mberror, 0); 1786 atomic_set(&n_rcu_torture_error, 0); 1787 n_rcu_torture_barrier_error = 0; 1788 n_rcu_torture_boost_ktrerror = 0; 1789 n_rcu_torture_boost_rterror = 0; 1790 n_rcu_torture_boost_failure = 0; 1791 n_rcu_torture_boosts = 0; 1792 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1793 atomic_set(&rcu_torture_wcount[i], 0); 1794 for_each_possible_cpu(cpu) { 1795 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1796 per_cpu(rcu_torture_count, cpu)[i] = 0; 1797 per_cpu(rcu_torture_batch, cpu)[i] = 0; 1798 } 1799 } 1800 1801 /* Start up the kthreads. */ 1802 1803 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 1804 writer_task); 1805 if (firsterr) 1806 goto unwind; 1807 if (nfakewriters > 0) { 1808 fakewriter_tasks = kzalloc(nfakewriters * 1809 sizeof(fakewriter_tasks[0]), 1810 GFP_KERNEL); 1811 if (fakewriter_tasks == NULL) { 1812 VERBOSE_TOROUT_ERRSTRING("out of memory"); 1813 firsterr = -ENOMEM; 1814 goto unwind; 1815 } 1816 } 1817 for (i = 0; i < nfakewriters; i++) { 1818 firsterr = torture_create_kthread(rcu_torture_fakewriter, 1819 NULL, fakewriter_tasks[i]); 1820 if (firsterr) 1821 goto unwind; 1822 } 1823 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), 1824 GFP_KERNEL); 1825 if (reader_tasks == NULL) { 1826 VERBOSE_TOROUT_ERRSTRING("out of memory"); 1827 firsterr = -ENOMEM; 1828 goto unwind; 1829 } 1830 for (i = 0; i < nrealreaders; i++) { 1831 firsterr = torture_create_kthread(rcu_torture_reader, NULL, 1832 reader_tasks[i]); 1833 if (firsterr) 1834 goto unwind; 1835 } 1836 if (stat_interval > 0) { 1837 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 1838 stats_task); 1839 if (firsterr) 1840 goto unwind; 1841 } 1842 if (test_no_idle_hz && shuffle_interval > 0) { 1843 firsterr = torture_shuffle_init(shuffle_interval * HZ); 1844 if (firsterr) 1845 goto unwind; 1846 } 1847 if (stutter < 0) 1848 stutter = 0; 1849 if (stutter) { 1850 firsterr = torture_stutter_init(stutter * HZ); 1851 if (firsterr) 1852 goto unwind; 1853 } 1854 if (fqs_duration < 0) 1855 fqs_duration = 0; 1856 if (fqs_duration) { 1857 /* Create the fqs thread */ 1858 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 1859 fqs_task); 1860 if (firsterr) 1861 goto unwind; 1862 } 1863 if (test_boost_interval < 1) 1864 test_boost_interval = 1; 1865 if (test_boost_duration < 2) 1866 test_boost_duration = 2; 1867 if ((test_boost == 1 && cur_ops->can_boost) || 1868 test_boost == 2) { 1869 1870 boost_starttime = jiffies + test_boost_interval * HZ; 1871 1872 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 1873 rcutorture_booster_init, 1874 rcutorture_booster_cleanup); 1875 if (firsterr < 0) 1876 goto unwind; 1877 rcutor_hp = firsterr; 1878 } 1879 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 1880 if (firsterr) 1881 goto unwind; 1882 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); 1883 if (firsterr) 1884 goto unwind; 1885 firsterr = rcu_torture_stall_init(); 1886 if (firsterr) 1887 goto unwind; 1888 firsterr = rcu_torture_barrier_init(); 1889 if (firsterr) 1890 goto unwind; 1891 if (object_debug) 1892 rcu_test_debug_objects(); 1893 if (cbflood_n_burst > 0) { 1894 /* Create the cbflood threads */ 1895 ncbflooders = (num_online_cpus() + 3) / 4; 1896 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task), 1897 GFP_KERNEL); 1898 if (!cbflood_task) { 1899 VERBOSE_TOROUT_ERRSTRING("out of memory"); 1900 firsterr = -ENOMEM; 1901 goto unwind; 1902 } 1903 for (i = 0; i < ncbflooders; i++) { 1904 firsterr = torture_create_kthread(rcu_torture_cbflood, 1905 NULL, 1906 cbflood_task[i]); 1907 if (firsterr) 1908 goto unwind; 1909 } 1910 } 1911 rcutorture_record_test_transition(); 1912 torture_init_end(); 1913 return 0; 1914 1915 unwind: 1916 torture_init_end(); 1917 rcu_torture_cleanup(); 1918 return firsterr; 1919 } 1920 1921 module_init(rcu_torture_init); 1922 module_exit(rcu_torture_cleanup); 1923