1 /* 2 * Read-Copy Update module-based torture test facility 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2005, 2006 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Josh Triplett <josh@joshtriplett.org> 22 * 23 * See also: Documentation/RCU/torture.txt 24 */ 25 26 #define pr_fmt(fmt) fmt 27 28 #include <linux/types.h> 29 #include <linux/kernel.h> 30 #include <linux/init.h> 31 #include <linux/module.h> 32 #include <linux/kthread.h> 33 #include <linux/err.h> 34 #include <linux/spinlock.h> 35 #include <linux/smp.h> 36 #include <linux/rcupdate.h> 37 #include <linux/interrupt.h> 38 #include <linux/sched/signal.h> 39 #include <uapi/linux/sched/types.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/completion.h> 43 #include <linux/moduleparam.h> 44 #include <linux/percpu.h> 45 #include <linux/notifier.h> 46 #include <linux/reboot.h> 47 #include <linux/freezer.h> 48 #include <linux/cpu.h> 49 #include <linux/delay.h> 50 #include <linux/stat.h> 51 #include <linux/srcu.h> 52 #include <linux/slab.h> 53 #include <linux/trace_clock.h> 54 #include <asm/byteorder.h> 55 #include <linux/torture.h> 56 #include <linux/vmalloc.h> 57 #include <linux/sched/debug.h> 58 #include <linux/sched/sysctl.h> 59 60 #include "rcu.h" 61 62 MODULE_LICENSE("GPL"); 63 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 64 65 66 /* Bits for ->extendables field, extendables param, and related definitions. */ 67 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 68 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 69 #define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */ 70 #define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */ 71 #define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */ 72 #define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */ 73 #define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */ 74 #define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \ 75 RCUTORTURE_RDR_PREEMPT) 76 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 77 /* Must be power of two minus one. */ 78 79 torture_param(int, cbflood_inter_holdoff, HZ, 80 "Holdoff between floods (jiffies)"); 81 torture_param(int, cbflood_intra_holdoff, 1, 82 "Holdoff between bursts (jiffies)"); 83 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); 84 torture_param(int, cbflood_n_per_burst, 20000, 85 "# callbacks per burst in flood"); 86 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 87 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 88 torture_param(int, fqs_duration, 0, 89 "Duration of fqs bursts (us), 0 to disable"); 90 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 91 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 92 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 93 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 94 torture_param(bool, gp_normal, false, 95 "Use normal (non-expedited) GP wait primitives"); 96 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 97 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 98 torture_param(int, n_barrier_cbs, 0, 99 "# of callbacks/kthreads for barrier testing"); 100 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 101 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 102 torture_param(int, object_debug, 0, 103 "Enable debug-object double call_rcu() testing"); 104 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 105 torture_param(int, onoff_interval, 0, 106 "Time between CPU hotplugs (jiffies), 0=disable"); 107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 110 torture_param(int, stall_cpu_holdoff, 10, 111 "Time to wait before starting stall (s)."); 112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 113 torture_param(int, stat_interval, 60, 114 "Number of seconds between stats printk()s"); 115 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 116 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 117 torture_param(int, test_boost_duration, 4, 118 "Duration of each boost test, seconds."); 119 torture_param(int, test_boost_interval, 7, 120 "Interval between boost tests, seconds."); 121 torture_param(bool, test_no_idle_hz, true, 122 "Test support for tickless idle CPUs"); 123 torture_param(int, verbose, 1, 124 "Enable verbose debugging printk()s"); 125 126 static char *torture_type = "rcu"; 127 module_param(torture_type, charp, 0444); 128 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); 129 130 static int nrealreaders; 131 static int ncbflooders; 132 static struct task_struct *writer_task; 133 static struct task_struct **fakewriter_tasks; 134 static struct task_struct **reader_tasks; 135 static struct task_struct *stats_task; 136 static struct task_struct **cbflood_task; 137 static struct task_struct *fqs_task; 138 static struct task_struct *boost_tasks[NR_CPUS]; 139 static struct task_struct *stall_task; 140 static struct task_struct **barrier_cbs_tasks; 141 static struct task_struct *barrier_task; 142 143 #define RCU_TORTURE_PIPE_LEN 10 144 145 struct rcu_torture { 146 struct rcu_head rtort_rcu; 147 int rtort_pipe_count; 148 struct list_head rtort_free; 149 int rtort_mbtest; 150 }; 151 152 static LIST_HEAD(rcu_torture_freelist); 153 static struct rcu_torture __rcu *rcu_torture_current; 154 static unsigned long rcu_torture_current_version; 155 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 156 static DEFINE_SPINLOCK(rcu_torture_lock); 157 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 158 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 159 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 160 static atomic_t n_rcu_torture_alloc; 161 static atomic_t n_rcu_torture_alloc_fail; 162 static atomic_t n_rcu_torture_free; 163 static atomic_t n_rcu_torture_mberror; 164 static atomic_t n_rcu_torture_error; 165 static long n_rcu_torture_barrier_error; 166 static long n_rcu_torture_boost_ktrerror; 167 static long n_rcu_torture_boost_rterror; 168 static long n_rcu_torture_boost_failure; 169 static long n_rcu_torture_boosts; 170 static atomic_long_t n_rcu_torture_timers; 171 static long n_barrier_attempts; 172 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 173 static atomic_long_t n_cbfloods; 174 static struct list_head rcu_torture_removed; 175 176 static int rcu_torture_writer_state; 177 #define RTWS_FIXED_DELAY 0 178 #define RTWS_DELAY 1 179 #define RTWS_REPLACE 2 180 #define RTWS_DEF_FREE 3 181 #define RTWS_EXP_SYNC 4 182 #define RTWS_COND_GET 5 183 #define RTWS_COND_SYNC 6 184 #define RTWS_SYNC 7 185 #define RTWS_STUTTER 8 186 #define RTWS_STOPPING 9 187 static const char * const rcu_torture_writer_state_names[] = { 188 "RTWS_FIXED_DELAY", 189 "RTWS_DELAY", 190 "RTWS_REPLACE", 191 "RTWS_DEF_FREE", 192 "RTWS_EXP_SYNC", 193 "RTWS_COND_GET", 194 "RTWS_COND_SYNC", 195 "RTWS_SYNC", 196 "RTWS_STUTTER", 197 "RTWS_STOPPING", 198 }; 199 200 static const char *rcu_torture_writer_state_getname(void) 201 { 202 unsigned int i = READ_ONCE(rcu_torture_writer_state); 203 204 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 205 return "???"; 206 return rcu_torture_writer_state_names[i]; 207 } 208 209 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 210 #define rcu_can_boost() 1 211 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 212 #define rcu_can_boost() 0 213 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 214 215 #ifdef CONFIG_RCU_TRACE 216 static u64 notrace rcu_trace_clock_local(void) 217 { 218 u64 ts = trace_clock_local(); 219 220 (void)do_div(ts, NSEC_PER_USEC); 221 return ts; 222 } 223 #else /* #ifdef CONFIG_RCU_TRACE */ 224 static u64 notrace rcu_trace_clock_local(void) 225 { 226 return 0ULL; 227 } 228 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 229 230 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 231 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 232 /* and boost task create/destroy. */ 233 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 234 static bool barrier_phase; /* Test phase. */ 235 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 236 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 237 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 238 239 /* 240 * Allocate an element from the rcu_tortures pool. 241 */ 242 static struct rcu_torture * 243 rcu_torture_alloc(void) 244 { 245 struct list_head *p; 246 247 spin_lock_bh(&rcu_torture_lock); 248 if (list_empty(&rcu_torture_freelist)) { 249 atomic_inc(&n_rcu_torture_alloc_fail); 250 spin_unlock_bh(&rcu_torture_lock); 251 return NULL; 252 } 253 atomic_inc(&n_rcu_torture_alloc); 254 p = rcu_torture_freelist.next; 255 list_del_init(p); 256 spin_unlock_bh(&rcu_torture_lock); 257 return container_of(p, struct rcu_torture, rtort_free); 258 } 259 260 /* 261 * Free an element to the rcu_tortures pool. 262 */ 263 static void 264 rcu_torture_free(struct rcu_torture *p) 265 { 266 atomic_inc(&n_rcu_torture_free); 267 spin_lock_bh(&rcu_torture_lock); 268 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 269 spin_unlock_bh(&rcu_torture_lock); 270 } 271 272 /* 273 * Operations vector for selecting different types of tests. 274 */ 275 276 struct rcu_torture_ops { 277 int ttype; 278 void (*init)(void); 279 void (*cleanup)(void); 280 int (*readlock)(void); 281 void (*read_delay)(struct torture_random_state *rrsp); 282 void (*readunlock)(int idx); 283 unsigned long (*get_gp_seq)(void); 284 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 285 void (*deferred_free)(struct rcu_torture *p); 286 void (*sync)(void); 287 void (*exp_sync)(void); 288 unsigned long (*get_state)(void); 289 void (*cond_sync)(unsigned long oldstate); 290 call_rcu_func_t call; 291 void (*cb_barrier)(void); 292 void (*fqs)(void); 293 void (*stats)(void); 294 int irq_capable; 295 int can_boost; 296 int extendables; 297 int ext_irq_conflict; 298 const char *name; 299 }; 300 301 static struct rcu_torture_ops *cur_ops; 302 303 /* 304 * Definitions for rcu torture testing. 305 */ 306 307 static int rcu_torture_read_lock(void) __acquires(RCU) 308 { 309 rcu_read_lock(); 310 return 0; 311 } 312 313 static void rcu_read_delay(struct torture_random_state *rrsp) 314 { 315 unsigned long started; 316 unsigned long completed; 317 const unsigned long shortdelay_us = 200; 318 const unsigned long longdelay_ms = 50; 319 unsigned long long ts; 320 321 /* We want a short delay sometimes to make a reader delay the grace 322 * period, and we want a long delay occasionally to trigger 323 * force_quiescent_state. */ 324 325 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 326 started = cur_ops->get_gp_seq(); 327 ts = rcu_trace_clock_local(); 328 mdelay(longdelay_ms); 329 completed = cur_ops->get_gp_seq(); 330 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 331 started, completed); 332 } 333 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) 334 udelay(shortdelay_us); 335 if (!preempt_count() && 336 !(torture_random(rrsp) % (nrealreaders * 500))) 337 torture_preempt_schedule(); /* QS only if preemptible. */ 338 } 339 340 static void rcu_torture_read_unlock(int idx) __releases(RCU) 341 { 342 rcu_read_unlock(); 343 } 344 345 /* 346 * Update callback in the pipe. This should be invoked after a grace period. 347 */ 348 static bool 349 rcu_torture_pipe_update_one(struct rcu_torture *rp) 350 { 351 int i; 352 353 i = rp->rtort_pipe_count; 354 if (i > RCU_TORTURE_PIPE_LEN) 355 i = RCU_TORTURE_PIPE_LEN; 356 atomic_inc(&rcu_torture_wcount[i]); 357 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 358 rp->rtort_mbtest = 0; 359 return true; 360 } 361 return false; 362 } 363 364 /* 365 * Update all callbacks in the pipe. Suitable for synchronous grace-period 366 * primitives. 367 */ 368 static void 369 rcu_torture_pipe_update(struct rcu_torture *old_rp) 370 { 371 struct rcu_torture *rp; 372 struct rcu_torture *rp1; 373 374 if (old_rp) 375 list_add(&old_rp->rtort_free, &rcu_torture_removed); 376 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 377 if (rcu_torture_pipe_update_one(rp)) { 378 list_del(&rp->rtort_free); 379 rcu_torture_free(rp); 380 } 381 } 382 } 383 384 static void 385 rcu_torture_cb(struct rcu_head *p) 386 { 387 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 388 389 if (torture_must_stop_irq()) { 390 /* Test is ending, just drop callbacks on the floor. */ 391 /* The next initialization will pick up the pieces. */ 392 return; 393 } 394 if (rcu_torture_pipe_update_one(rp)) 395 rcu_torture_free(rp); 396 else 397 cur_ops->deferred_free(rp); 398 } 399 400 static unsigned long rcu_no_completed(void) 401 { 402 return 0; 403 } 404 405 static void rcu_torture_deferred_free(struct rcu_torture *p) 406 { 407 call_rcu(&p->rtort_rcu, rcu_torture_cb); 408 } 409 410 static void rcu_sync_torture_init(void) 411 { 412 INIT_LIST_HEAD(&rcu_torture_removed); 413 } 414 415 static struct rcu_torture_ops rcu_ops = { 416 .ttype = RCU_FLAVOR, 417 .init = rcu_sync_torture_init, 418 .readlock = rcu_torture_read_lock, 419 .read_delay = rcu_read_delay, 420 .readunlock = rcu_torture_read_unlock, 421 .get_gp_seq = rcu_get_gp_seq, 422 .gp_diff = rcu_seq_diff, 423 .deferred_free = rcu_torture_deferred_free, 424 .sync = synchronize_rcu, 425 .exp_sync = synchronize_rcu_expedited, 426 .get_state = get_state_synchronize_rcu, 427 .cond_sync = cond_synchronize_rcu, 428 .call = call_rcu, 429 .cb_barrier = rcu_barrier, 430 .fqs = rcu_force_quiescent_state, 431 .stats = NULL, 432 .irq_capable = 1, 433 .can_boost = rcu_can_boost(), 434 .name = "rcu" 435 }; 436 437 /* 438 * Definitions for rcu_bh torture testing. 439 */ 440 441 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) 442 { 443 rcu_read_lock_bh(); 444 return 0; 445 } 446 447 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) 448 { 449 rcu_read_unlock_bh(); 450 } 451 452 static void rcu_bh_torture_deferred_free(struct rcu_torture *p) 453 { 454 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 455 } 456 457 static struct rcu_torture_ops rcu_bh_ops = { 458 .ttype = RCU_BH_FLAVOR, 459 .init = rcu_sync_torture_init, 460 .readlock = rcu_bh_torture_read_lock, 461 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 462 .readunlock = rcu_bh_torture_read_unlock, 463 .get_gp_seq = rcu_bh_get_gp_seq, 464 .gp_diff = rcu_seq_diff, 465 .deferred_free = rcu_bh_torture_deferred_free, 466 .sync = synchronize_rcu_bh, 467 .exp_sync = synchronize_rcu_bh_expedited, 468 .call = call_rcu_bh, 469 .cb_barrier = rcu_barrier_bh, 470 .fqs = rcu_bh_force_quiescent_state, 471 .stats = NULL, 472 .irq_capable = 1, 473 .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ), 474 .ext_irq_conflict = RCUTORTURE_RDR_RCU, 475 .name = "rcu_bh" 476 }; 477 478 /* 479 * Don't even think about trying any of these in real life!!! 480 * The names includes "busted", and they really means it! 481 * The only purpose of these functions is to provide a buggy RCU 482 * implementation to make sure that rcutorture correctly emits 483 * buggy-RCU error messages. 484 */ 485 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 486 { 487 /* This is a deliberate bug for testing purposes only! */ 488 rcu_torture_cb(&p->rtort_rcu); 489 } 490 491 static void synchronize_rcu_busted(void) 492 { 493 /* This is a deliberate bug for testing purposes only! */ 494 } 495 496 static void 497 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 498 { 499 /* This is a deliberate bug for testing purposes only! */ 500 func(head); 501 } 502 503 static struct rcu_torture_ops rcu_busted_ops = { 504 .ttype = INVALID_RCU_FLAVOR, 505 .init = rcu_sync_torture_init, 506 .readlock = rcu_torture_read_lock, 507 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 508 .readunlock = rcu_torture_read_unlock, 509 .get_gp_seq = rcu_no_completed, 510 .deferred_free = rcu_busted_torture_deferred_free, 511 .sync = synchronize_rcu_busted, 512 .exp_sync = synchronize_rcu_busted, 513 .call = call_rcu_busted, 514 .cb_barrier = NULL, 515 .fqs = NULL, 516 .stats = NULL, 517 .irq_capable = 1, 518 .name = "busted" 519 }; 520 521 /* 522 * Definitions for srcu torture testing. 523 */ 524 525 DEFINE_STATIC_SRCU(srcu_ctl); 526 static struct srcu_struct srcu_ctld; 527 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 528 529 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 530 { 531 return srcu_read_lock(srcu_ctlp); 532 } 533 534 static void srcu_read_delay(struct torture_random_state *rrsp) 535 { 536 long delay; 537 const long uspertick = 1000000 / HZ; 538 const long longdelay = 10; 539 540 /* We want there to be long-running readers, but not all the time. */ 541 542 delay = torture_random(rrsp) % 543 (nrealreaders * 2 * longdelay * uspertick); 544 if (!delay && in_task()) 545 schedule_timeout_interruptible(longdelay); 546 else 547 rcu_read_delay(rrsp); 548 } 549 550 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 551 { 552 srcu_read_unlock(srcu_ctlp, idx); 553 } 554 555 static unsigned long srcu_torture_completed(void) 556 { 557 return srcu_batches_completed(srcu_ctlp); 558 } 559 560 static void srcu_torture_deferred_free(struct rcu_torture *rp) 561 { 562 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 563 } 564 565 static void srcu_torture_synchronize(void) 566 { 567 synchronize_srcu(srcu_ctlp); 568 } 569 570 static void srcu_torture_call(struct rcu_head *head, 571 rcu_callback_t func) 572 { 573 call_srcu(srcu_ctlp, head, func); 574 } 575 576 static void srcu_torture_barrier(void) 577 { 578 srcu_barrier(srcu_ctlp); 579 } 580 581 static void srcu_torture_stats(void) 582 { 583 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 584 } 585 586 static void srcu_torture_synchronize_expedited(void) 587 { 588 synchronize_srcu_expedited(srcu_ctlp); 589 } 590 591 static struct rcu_torture_ops srcu_ops = { 592 .ttype = SRCU_FLAVOR, 593 .init = rcu_sync_torture_init, 594 .readlock = srcu_torture_read_lock, 595 .read_delay = srcu_read_delay, 596 .readunlock = srcu_torture_read_unlock, 597 .get_gp_seq = srcu_torture_completed, 598 .deferred_free = srcu_torture_deferred_free, 599 .sync = srcu_torture_synchronize, 600 .exp_sync = srcu_torture_synchronize_expedited, 601 .call = srcu_torture_call, 602 .cb_barrier = srcu_torture_barrier, 603 .stats = srcu_torture_stats, 604 .irq_capable = 1, 605 .name = "srcu" 606 }; 607 608 static void srcu_torture_init(void) 609 { 610 rcu_sync_torture_init(); 611 WARN_ON(init_srcu_struct(&srcu_ctld)); 612 srcu_ctlp = &srcu_ctld; 613 } 614 615 static void srcu_torture_cleanup(void) 616 { 617 static DEFINE_TORTURE_RANDOM(rand); 618 619 if (torture_random(&rand) & 0x800) 620 cleanup_srcu_struct(&srcu_ctld); 621 else 622 cleanup_srcu_struct_quiesced(&srcu_ctld); 623 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 624 } 625 626 /* As above, but dynamically allocated. */ 627 static struct rcu_torture_ops srcud_ops = { 628 .ttype = SRCU_FLAVOR, 629 .init = srcu_torture_init, 630 .cleanup = srcu_torture_cleanup, 631 .readlock = srcu_torture_read_lock, 632 .read_delay = srcu_read_delay, 633 .readunlock = srcu_torture_read_unlock, 634 .get_gp_seq = srcu_torture_completed, 635 .deferred_free = srcu_torture_deferred_free, 636 .sync = srcu_torture_synchronize, 637 .exp_sync = srcu_torture_synchronize_expedited, 638 .call = srcu_torture_call, 639 .cb_barrier = srcu_torture_barrier, 640 .stats = srcu_torture_stats, 641 .irq_capable = 1, 642 .name = "srcud" 643 }; 644 645 /* As above, but broken due to inappropriate reader extension. */ 646 static struct rcu_torture_ops busted_srcud_ops = { 647 .ttype = SRCU_FLAVOR, 648 .init = srcu_torture_init, 649 .cleanup = srcu_torture_cleanup, 650 .readlock = srcu_torture_read_lock, 651 .read_delay = rcu_read_delay, 652 .readunlock = srcu_torture_read_unlock, 653 .get_gp_seq = srcu_torture_completed, 654 .deferred_free = srcu_torture_deferred_free, 655 .sync = srcu_torture_synchronize, 656 .exp_sync = srcu_torture_synchronize_expedited, 657 .call = srcu_torture_call, 658 .cb_barrier = srcu_torture_barrier, 659 .stats = srcu_torture_stats, 660 .irq_capable = 1, 661 .extendables = RCUTORTURE_MAX_EXTEND, 662 .name = "busted_srcud" 663 }; 664 665 /* 666 * Definitions for sched torture testing. 667 */ 668 669 static int sched_torture_read_lock(void) 670 { 671 preempt_disable(); 672 return 0; 673 } 674 675 static void sched_torture_read_unlock(int idx) 676 { 677 preempt_enable(); 678 } 679 680 static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 681 { 682 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 683 } 684 685 static struct rcu_torture_ops sched_ops = { 686 .ttype = RCU_SCHED_FLAVOR, 687 .init = rcu_sync_torture_init, 688 .readlock = sched_torture_read_lock, 689 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 690 .readunlock = sched_torture_read_unlock, 691 .get_gp_seq = rcu_sched_get_gp_seq, 692 .gp_diff = rcu_seq_diff, 693 .deferred_free = rcu_sched_torture_deferred_free, 694 .sync = synchronize_sched, 695 .exp_sync = synchronize_sched_expedited, 696 .get_state = get_state_synchronize_sched, 697 .cond_sync = cond_synchronize_sched, 698 .call = call_rcu_sched, 699 .cb_barrier = rcu_barrier_sched, 700 .fqs = rcu_sched_force_quiescent_state, 701 .stats = NULL, 702 .irq_capable = 1, 703 .extendables = RCUTORTURE_MAX_EXTEND, 704 .name = "sched" 705 }; 706 707 /* 708 * Definitions for RCU-tasks torture testing. 709 */ 710 711 static int tasks_torture_read_lock(void) 712 { 713 return 0; 714 } 715 716 static void tasks_torture_read_unlock(int idx) 717 { 718 } 719 720 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 721 { 722 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 723 } 724 725 static struct rcu_torture_ops tasks_ops = { 726 .ttype = RCU_TASKS_FLAVOR, 727 .init = rcu_sync_torture_init, 728 .readlock = tasks_torture_read_lock, 729 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 730 .readunlock = tasks_torture_read_unlock, 731 .get_gp_seq = rcu_no_completed, 732 .deferred_free = rcu_tasks_torture_deferred_free, 733 .sync = synchronize_rcu_tasks, 734 .exp_sync = synchronize_rcu_tasks, 735 .call = call_rcu_tasks, 736 .cb_barrier = rcu_barrier_tasks, 737 .fqs = NULL, 738 .stats = NULL, 739 .irq_capable = 1, 740 .name = "tasks" 741 }; 742 743 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 744 { 745 if (!cur_ops->gp_diff) 746 return new - old; 747 return cur_ops->gp_diff(new, old); 748 } 749 750 static bool __maybe_unused torturing_tasks(void) 751 { 752 return cur_ops == &tasks_ops; 753 } 754 755 /* 756 * RCU torture priority-boost testing. Runs one real-time thread per 757 * CPU for moderate bursts, repeatedly registering RCU callbacks and 758 * spinning waiting for them to be invoked. If a given callback takes 759 * too long to be invoked, we assume that priority inversion has occurred. 760 */ 761 762 struct rcu_boost_inflight { 763 struct rcu_head rcu; 764 int inflight; 765 }; 766 767 static void rcu_torture_boost_cb(struct rcu_head *head) 768 { 769 struct rcu_boost_inflight *rbip = 770 container_of(head, struct rcu_boost_inflight, rcu); 771 772 /* Ensure RCU-core accesses precede clearing ->inflight */ 773 smp_store_release(&rbip->inflight, 0); 774 } 775 776 static int old_rt_runtime = -1; 777 778 static void rcu_torture_disable_rt_throttle(void) 779 { 780 /* 781 * Disable RT throttling so that rcutorture's boost threads don't get 782 * throttled. Only possible if rcutorture is built-in otherwise the 783 * user should manually do this by setting the sched_rt_period_us and 784 * sched_rt_runtime sysctls. 785 */ 786 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 787 return; 788 789 old_rt_runtime = sysctl_sched_rt_runtime; 790 sysctl_sched_rt_runtime = -1; 791 } 792 793 static void rcu_torture_enable_rt_throttle(void) 794 { 795 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 796 return; 797 798 sysctl_sched_rt_runtime = old_rt_runtime; 799 old_rt_runtime = -1; 800 } 801 802 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 803 { 804 if (end - start > test_boost_duration * HZ - HZ / 2) { 805 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 806 n_rcu_torture_boost_failure++; 807 808 return true; /* failed */ 809 } 810 811 return false; /* passed */ 812 } 813 814 static int rcu_torture_boost(void *arg) 815 { 816 unsigned long call_rcu_time; 817 unsigned long endtime; 818 unsigned long oldstarttime; 819 struct rcu_boost_inflight rbi = { .inflight = 0 }; 820 struct sched_param sp; 821 822 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 823 824 /* Set real-time priority. */ 825 sp.sched_priority = 1; 826 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 827 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 828 n_rcu_torture_boost_rterror++; 829 } 830 831 init_rcu_head_on_stack(&rbi.rcu); 832 /* Each pass through the following loop does one boost-test cycle. */ 833 do { 834 /* Track if the test failed already in this test interval? */ 835 bool failed = false; 836 837 /* Increment n_rcu_torture_boosts once per boost-test */ 838 while (!kthread_should_stop()) { 839 if (mutex_trylock(&boost_mutex)) { 840 n_rcu_torture_boosts++; 841 mutex_unlock(&boost_mutex); 842 break; 843 } 844 schedule_timeout_uninterruptible(1); 845 } 846 if (kthread_should_stop()) 847 goto checkwait; 848 849 /* Wait for the next test interval. */ 850 oldstarttime = boost_starttime; 851 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 852 schedule_timeout_interruptible(oldstarttime - jiffies); 853 stutter_wait("rcu_torture_boost"); 854 if (torture_must_stop()) 855 goto checkwait; 856 } 857 858 /* Do one boost-test interval. */ 859 endtime = oldstarttime + test_boost_duration * HZ; 860 call_rcu_time = jiffies; 861 while (ULONG_CMP_LT(jiffies, endtime)) { 862 /* If we don't have a callback in flight, post one. */ 863 if (!smp_load_acquire(&rbi.inflight)) { 864 /* RCU core before ->inflight = 1. */ 865 smp_store_release(&rbi.inflight, 1); 866 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 867 /* Check if the boost test failed */ 868 failed = failed || 869 rcu_torture_boost_failed(call_rcu_time, 870 jiffies); 871 call_rcu_time = jiffies; 872 } 873 stutter_wait("rcu_torture_boost"); 874 if (torture_must_stop()) 875 goto checkwait; 876 } 877 878 /* 879 * If boost never happened, then inflight will always be 1, in 880 * this case the boost check would never happen in the above 881 * loop so do another one here. 882 */ 883 if (!failed && smp_load_acquire(&rbi.inflight)) 884 rcu_torture_boost_failed(call_rcu_time, jiffies); 885 886 /* 887 * Set the start time of the next test interval. 888 * Yes, this is vulnerable to long delays, but such 889 * delays simply cause a false negative for the next 890 * interval. Besides, we are running at RT priority, 891 * so delays should be relatively rare. 892 */ 893 while (oldstarttime == boost_starttime && 894 !kthread_should_stop()) { 895 if (mutex_trylock(&boost_mutex)) { 896 boost_starttime = jiffies + 897 test_boost_interval * HZ; 898 mutex_unlock(&boost_mutex); 899 break; 900 } 901 schedule_timeout_uninterruptible(1); 902 } 903 904 /* Go do the stutter. */ 905 checkwait: stutter_wait("rcu_torture_boost"); 906 } while (!torture_must_stop()); 907 908 /* Clean up and exit. */ 909 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 910 torture_shutdown_absorb("rcu_torture_boost"); 911 schedule_timeout_uninterruptible(1); 912 } 913 destroy_rcu_head_on_stack(&rbi.rcu); 914 torture_kthread_stopping("rcu_torture_boost"); 915 return 0; 916 } 917 918 static void rcu_torture_cbflood_cb(struct rcu_head *rhp) 919 { 920 } 921 922 /* 923 * RCU torture callback-flood kthread. Repeatedly induces bursts of calls 924 * to call_rcu() or analogous, increasing the probability of occurrence 925 * of callback-overflow corner cases. 926 */ 927 static int 928 rcu_torture_cbflood(void *arg) 929 { 930 int err = 1; 931 int i; 932 int j; 933 struct rcu_head *rhp; 934 935 if (cbflood_n_per_burst > 0 && 936 cbflood_inter_holdoff > 0 && 937 cbflood_intra_holdoff > 0 && 938 cur_ops->call && 939 cur_ops->cb_barrier) { 940 rhp = vmalloc(array3_size(cbflood_n_burst, 941 cbflood_n_per_burst, 942 sizeof(*rhp))); 943 err = !rhp; 944 } 945 if (err) { 946 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM"); 947 goto wait_for_stop; 948 } 949 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started"); 950 do { 951 schedule_timeout_interruptible(cbflood_inter_holdoff); 952 atomic_long_inc(&n_cbfloods); 953 WARN_ON(signal_pending(current)); 954 for (i = 0; i < cbflood_n_burst; i++) { 955 for (j = 0; j < cbflood_n_per_burst; j++) { 956 cur_ops->call(&rhp[i * cbflood_n_per_burst + j], 957 rcu_torture_cbflood_cb); 958 } 959 schedule_timeout_interruptible(cbflood_intra_holdoff); 960 WARN_ON(signal_pending(current)); 961 } 962 cur_ops->cb_barrier(); 963 stutter_wait("rcu_torture_cbflood"); 964 } while (!torture_must_stop()); 965 vfree(rhp); 966 wait_for_stop: 967 torture_kthread_stopping("rcu_torture_cbflood"); 968 return 0; 969 } 970 971 /* 972 * RCU torture force-quiescent-state kthread. Repeatedly induces 973 * bursts of calls to force_quiescent_state(), increasing the probability 974 * of occurrence of some important types of race conditions. 975 */ 976 static int 977 rcu_torture_fqs(void *arg) 978 { 979 unsigned long fqs_resume_time; 980 int fqs_burst_remaining; 981 982 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 983 do { 984 fqs_resume_time = jiffies + fqs_stutter * HZ; 985 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 986 !kthread_should_stop()) { 987 schedule_timeout_interruptible(1); 988 } 989 fqs_burst_remaining = fqs_duration; 990 while (fqs_burst_remaining > 0 && 991 !kthread_should_stop()) { 992 cur_ops->fqs(); 993 udelay(fqs_holdoff); 994 fqs_burst_remaining -= fqs_holdoff; 995 } 996 stutter_wait("rcu_torture_fqs"); 997 } while (!torture_must_stop()); 998 torture_kthread_stopping("rcu_torture_fqs"); 999 return 0; 1000 } 1001 1002 /* 1003 * RCU torture writer kthread. Repeatedly substitutes a new structure 1004 * for that pointed to by rcu_torture_current, freeing the old structure 1005 * after a series of grace periods (the "pipeline"). 1006 */ 1007 static int 1008 rcu_torture_writer(void *arg) 1009 { 1010 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1011 int expediting = 0; 1012 unsigned long gp_snap; 1013 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1014 bool gp_sync1 = gp_sync; 1015 int i; 1016 struct rcu_torture *rp; 1017 struct rcu_torture *old_rp; 1018 static DEFINE_TORTURE_RANDOM(rand); 1019 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 1020 RTWS_COND_GET, RTWS_SYNC }; 1021 int nsynctypes = 0; 1022 1023 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1024 if (!can_expedite) 1025 pr_alert("%s" TORTURE_FLAG 1026 " GP expediting controlled from boot/sysfs for %s.\n", 1027 torture_type, cur_ops->name); 1028 1029 /* Initialize synctype[] array. If none set, take default. */ 1030 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 1031 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 1032 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 1033 synctype[nsynctypes++] = RTWS_COND_GET; 1034 pr_info("%s: Testing conditional GPs.\n", __func__); 1035 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 1036 pr_alert("%s: gp_cond without primitives.\n", __func__); 1037 } 1038 if (gp_exp1 && cur_ops->exp_sync) { 1039 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1040 pr_info("%s: Testing expedited GPs.\n", __func__); 1041 } else if (gp_exp && !cur_ops->exp_sync) { 1042 pr_alert("%s: gp_exp without primitives.\n", __func__); 1043 } 1044 if (gp_normal1 && cur_ops->deferred_free) { 1045 synctype[nsynctypes++] = RTWS_DEF_FREE; 1046 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1047 } else if (gp_normal && !cur_ops->deferred_free) { 1048 pr_alert("%s: gp_normal without primitives.\n", __func__); 1049 } 1050 if (gp_sync1 && cur_ops->sync) { 1051 synctype[nsynctypes++] = RTWS_SYNC; 1052 pr_info("%s: Testing normal GPs.\n", __func__); 1053 } else if (gp_sync && !cur_ops->sync) { 1054 pr_alert("%s: gp_sync without primitives.\n", __func__); 1055 } 1056 if (WARN_ONCE(nsynctypes == 0, 1057 "rcu_torture_writer: No update-side primitives.\n")) { 1058 /* 1059 * No updates primitives, so don't try updating. 1060 * The resulting test won't be testing much, hence the 1061 * above WARN_ONCE(). 1062 */ 1063 rcu_torture_writer_state = RTWS_STOPPING; 1064 torture_kthread_stopping("rcu_torture_writer"); 1065 } 1066 1067 do { 1068 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1069 schedule_timeout_uninterruptible(1); 1070 rp = rcu_torture_alloc(); 1071 if (rp == NULL) 1072 continue; 1073 rp->rtort_pipe_count = 0; 1074 rcu_torture_writer_state = RTWS_DELAY; 1075 udelay(torture_random(&rand) & 0x3ff); 1076 rcu_torture_writer_state = RTWS_REPLACE; 1077 old_rp = rcu_dereference_check(rcu_torture_current, 1078 current == writer_task); 1079 rp->rtort_mbtest = 1; 1080 rcu_assign_pointer(rcu_torture_current, rp); 1081 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1082 if (old_rp) { 1083 i = old_rp->rtort_pipe_count; 1084 if (i > RCU_TORTURE_PIPE_LEN) 1085 i = RCU_TORTURE_PIPE_LEN; 1086 atomic_inc(&rcu_torture_wcount[i]); 1087 old_rp->rtort_pipe_count++; 1088 switch (synctype[torture_random(&rand) % nsynctypes]) { 1089 case RTWS_DEF_FREE: 1090 rcu_torture_writer_state = RTWS_DEF_FREE; 1091 cur_ops->deferred_free(old_rp); 1092 break; 1093 case RTWS_EXP_SYNC: 1094 rcu_torture_writer_state = RTWS_EXP_SYNC; 1095 cur_ops->exp_sync(); 1096 rcu_torture_pipe_update(old_rp); 1097 break; 1098 case RTWS_COND_GET: 1099 rcu_torture_writer_state = RTWS_COND_GET; 1100 gp_snap = cur_ops->get_state(); 1101 i = torture_random(&rand) % 16; 1102 if (i != 0) 1103 schedule_timeout_interruptible(i); 1104 udelay(torture_random(&rand) % 1000); 1105 rcu_torture_writer_state = RTWS_COND_SYNC; 1106 cur_ops->cond_sync(gp_snap); 1107 rcu_torture_pipe_update(old_rp); 1108 break; 1109 case RTWS_SYNC: 1110 rcu_torture_writer_state = RTWS_SYNC; 1111 cur_ops->sync(); 1112 rcu_torture_pipe_update(old_rp); 1113 break; 1114 default: 1115 WARN_ON_ONCE(1); 1116 break; 1117 } 1118 } 1119 rcu_torture_current_version++; 1120 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1121 if (can_expedite && 1122 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1123 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1124 if (expediting >= 0) 1125 rcu_expedite_gp(); 1126 else 1127 rcu_unexpedite_gp(); 1128 if (++expediting > 3) 1129 expediting = -expediting; 1130 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1131 can_expedite = !rcu_gp_is_expedited() && 1132 !rcu_gp_is_normal(); 1133 } 1134 rcu_torture_writer_state = RTWS_STUTTER; 1135 stutter_wait("rcu_torture_writer"); 1136 } while (!torture_must_stop()); 1137 /* Reset expediting back to unexpedited. */ 1138 if (expediting > 0) 1139 expediting = -expediting; 1140 while (can_expedite && expediting++ < 0) 1141 rcu_unexpedite_gp(); 1142 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1143 if (!can_expedite) 1144 pr_alert("%s" TORTURE_FLAG 1145 " Dynamic grace-period expediting was disabled.\n", 1146 torture_type); 1147 rcu_torture_writer_state = RTWS_STOPPING; 1148 torture_kthread_stopping("rcu_torture_writer"); 1149 return 0; 1150 } 1151 1152 /* 1153 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1154 * delay between calls. 1155 */ 1156 static int 1157 rcu_torture_fakewriter(void *arg) 1158 { 1159 DEFINE_TORTURE_RANDOM(rand); 1160 1161 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1162 set_user_nice(current, MAX_NICE); 1163 1164 do { 1165 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1166 udelay(torture_random(&rand) & 0x3ff); 1167 if (cur_ops->cb_barrier != NULL && 1168 torture_random(&rand) % (nfakewriters * 8) == 0) { 1169 cur_ops->cb_barrier(); 1170 } else if (gp_normal == gp_exp) { 1171 if (cur_ops->sync && torture_random(&rand) & 0x80) 1172 cur_ops->sync(); 1173 else if (cur_ops->exp_sync) 1174 cur_ops->exp_sync(); 1175 } else if (gp_normal && cur_ops->sync) { 1176 cur_ops->sync(); 1177 } else if (cur_ops->exp_sync) { 1178 cur_ops->exp_sync(); 1179 } 1180 stutter_wait("rcu_torture_fakewriter"); 1181 } while (!torture_must_stop()); 1182 1183 torture_kthread_stopping("rcu_torture_fakewriter"); 1184 return 0; 1185 } 1186 1187 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1188 { 1189 kfree(rhp); 1190 } 1191 1192 /* 1193 * Do one extension of an RCU read-side critical section using the 1194 * current reader state in readstate (set to zero for initial entry 1195 * to extended critical section), set the new state as specified by 1196 * newstate (set to zero for final exit from extended critical section), 1197 * and random-number-generator state in trsp. If this is neither the 1198 * beginning or end of the critical section and if there was actually a 1199 * change, do a ->read_delay(). 1200 */ 1201 static void rcutorture_one_extend(int *readstate, int newstate, 1202 struct torture_random_state *trsp) 1203 { 1204 int idxnew = -1; 1205 int idxold = *readstate; 1206 int statesnew = ~*readstate & newstate; 1207 int statesold = *readstate & ~newstate; 1208 1209 WARN_ON_ONCE(idxold < 0); 1210 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1211 1212 /* First, put new protection in place to avoid critical-section gap. */ 1213 if (statesnew & RCUTORTURE_RDR_BH) 1214 local_bh_disable(); 1215 if (statesnew & RCUTORTURE_RDR_IRQ) 1216 local_irq_disable(); 1217 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1218 preempt_disable(); 1219 if (statesnew & RCUTORTURE_RDR_RCU) 1220 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1221 1222 /* Next, remove old protection, irq first due to bh conflict. */ 1223 if (statesold & RCUTORTURE_RDR_IRQ) 1224 local_irq_enable(); 1225 if (statesold & RCUTORTURE_RDR_BH) 1226 local_bh_enable(); 1227 if (statesold & RCUTORTURE_RDR_PREEMPT) 1228 preempt_enable(); 1229 if (statesold & RCUTORTURE_RDR_RCU) 1230 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1231 1232 /* Delay if neither beginning nor end and there was a change. */ 1233 if ((statesnew || statesold) && *readstate && newstate) 1234 cur_ops->read_delay(trsp); 1235 1236 /* Update the reader state. */ 1237 if (idxnew == -1) 1238 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1239 WARN_ON_ONCE(idxnew < 0); 1240 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1241 *readstate = idxnew | newstate; 1242 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1243 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1244 } 1245 1246 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1247 static int rcutorture_extend_mask_max(void) 1248 { 1249 int mask; 1250 1251 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1252 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1253 mask = mask | RCUTORTURE_RDR_RCU; 1254 return mask; 1255 } 1256 1257 /* Return a random protection state mask, but with at least one bit set. */ 1258 static int 1259 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1260 { 1261 int mask = rcutorture_extend_mask_max(); 1262 unsigned long randmask1 = torture_random(trsp) >> 8; 1263 unsigned long randmask2 = randmask1 >> 1; 1264 1265 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1266 /* Half the time lots of bits, half the time only one bit. */ 1267 if (randmask1 & 0x1) 1268 mask = mask & randmask2; 1269 else 1270 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1271 if ((mask & RCUTORTURE_RDR_IRQ) && 1272 !(mask & RCUTORTURE_RDR_BH) && 1273 (oldmask & RCUTORTURE_RDR_BH)) 1274 mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */ 1275 if ((mask & RCUTORTURE_RDR_IRQ) && 1276 !(mask & cur_ops->ext_irq_conflict) && 1277 (oldmask & cur_ops->ext_irq_conflict)) 1278 mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ 1279 return mask ?: RCUTORTURE_RDR_RCU; 1280 } 1281 1282 /* 1283 * Do a randomly selected number of extensions of an existing RCU read-side 1284 * critical section. 1285 */ 1286 static void rcutorture_loop_extend(int *readstate, 1287 struct torture_random_state *trsp) 1288 { 1289 int i; 1290 int mask = rcutorture_extend_mask_max(); 1291 1292 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1293 if (!((mask - 1) & mask)) 1294 return; /* Current RCU flavor not extendable. */ 1295 i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS; 1296 while (i--) { 1297 mask = rcutorture_extend_mask(*readstate, trsp); 1298 rcutorture_one_extend(readstate, mask, trsp); 1299 } 1300 } 1301 1302 /* 1303 * Do one read-side critical section, returning false if there was 1304 * no data to read. Can be invoked both from process context and 1305 * from a timer handler. 1306 */ 1307 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1308 { 1309 unsigned long started; 1310 unsigned long completed; 1311 int newstate; 1312 struct rcu_torture *p; 1313 int pipe_count; 1314 int readstate = 0; 1315 unsigned long long ts; 1316 1317 newstate = rcutorture_extend_mask(readstate, trsp); 1318 rcutorture_one_extend(&readstate, newstate, trsp); 1319 started = cur_ops->get_gp_seq(); 1320 ts = rcu_trace_clock_local(); 1321 p = rcu_dereference_check(rcu_torture_current, 1322 rcu_read_lock_bh_held() || 1323 rcu_read_lock_sched_held() || 1324 srcu_read_lock_held(srcu_ctlp) || 1325 torturing_tasks()); 1326 if (p == NULL) { 1327 /* Wait for rcu_torture_writer to get underway */ 1328 rcutorture_one_extend(&readstate, 0, trsp); 1329 return false; 1330 } 1331 if (p->rtort_mbtest == 0) 1332 atomic_inc(&n_rcu_torture_mberror); 1333 rcutorture_loop_extend(&readstate, trsp); 1334 preempt_disable(); 1335 pipe_count = p->rtort_pipe_count; 1336 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1337 /* Should not happen, but... */ 1338 pipe_count = RCU_TORTURE_PIPE_LEN; 1339 } 1340 completed = cur_ops->get_gp_seq(); 1341 if (pipe_count > 1) { 1342 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1343 ts, started, completed); 1344 rcu_ftrace_dump(DUMP_ALL); 1345 } 1346 __this_cpu_inc(rcu_torture_count[pipe_count]); 1347 completed = rcutorture_seq_diff(completed, started); 1348 if (completed > RCU_TORTURE_PIPE_LEN) { 1349 /* Should not happen, but... */ 1350 completed = RCU_TORTURE_PIPE_LEN; 1351 } 1352 __this_cpu_inc(rcu_torture_batch[completed]); 1353 preempt_enable(); 1354 rcutorture_one_extend(&readstate, 0, trsp); 1355 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1356 return true; 1357 } 1358 1359 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1360 1361 /* 1362 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1363 * incrementing the corresponding element of the pipeline array. The 1364 * counter in the element should never be greater than 1, otherwise, the 1365 * RCU implementation is broken. 1366 */ 1367 static void rcu_torture_timer(struct timer_list *unused) 1368 { 1369 atomic_long_inc(&n_rcu_torture_timers); 1370 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1371 1372 /* Test call_rcu() invocation from interrupt handler. */ 1373 if (cur_ops->call) { 1374 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1375 1376 if (rhp) 1377 cur_ops->call(rhp, rcu_torture_timer_cb); 1378 } 1379 } 1380 1381 /* 1382 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1383 * incrementing the corresponding element of the pipeline array. The 1384 * counter in the element should never be greater than 1, otherwise, the 1385 * RCU implementation is broken. 1386 */ 1387 static int 1388 rcu_torture_reader(void *arg) 1389 { 1390 DEFINE_TORTURE_RANDOM(rand); 1391 struct timer_list t; 1392 1393 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1394 set_user_nice(current, MAX_NICE); 1395 if (irqreader && cur_ops->irq_capable) 1396 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1397 1398 do { 1399 if (irqreader && cur_ops->irq_capable) { 1400 if (!timer_pending(&t)) 1401 mod_timer(&t, jiffies + 1); 1402 } 1403 if (!rcu_torture_one_read(&rand)) 1404 schedule_timeout_interruptible(HZ); 1405 stutter_wait("rcu_torture_reader"); 1406 } while (!torture_must_stop()); 1407 if (irqreader && cur_ops->irq_capable) { 1408 del_timer_sync(&t); 1409 destroy_timer_on_stack(&t); 1410 } 1411 torture_kthread_stopping("rcu_torture_reader"); 1412 return 0; 1413 } 1414 1415 /* 1416 * Print torture statistics. Caller must ensure that there is only 1417 * one call to this function at a given time!!! This is normally 1418 * accomplished by relying on the module system to only have one copy 1419 * of the module loaded, and then by giving the rcu_torture_stats 1420 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1421 * thread is not running). 1422 */ 1423 static void 1424 rcu_torture_stats_print(void) 1425 { 1426 int cpu; 1427 int i; 1428 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1429 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1430 static unsigned long rtcv_snap = ULONG_MAX; 1431 static bool splatted; 1432 struct task_struct *wtp; 1433 1434 for_each_possible_cpu(cpu) { 1435 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1436 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1437 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1438 } 1439 } 1440 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1441 if (pipesummary[i] != 0) 1442 break; 1443 } 1444 1445 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1446 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1447 rcu_torture_current, 1448 rcu_torture_current_version, 1449 list_empty(&rcu_torture_freelist), 1450 atomic_read(&n_rcu_torture_alloc), 1451 atomic_read(&n_rcu_torture_alloc_fail), 1452 atomic_read(&n_rcu_torture_free)); 1453 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1454 atomic_read(&n_rcu_torture_mberror), 1455 n_rcu_torture_barrier_error, 1456 n_rcu_torture_boost_ktrerror, 1457 n_rcu_torture_boost_rterror); 1458 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1459 n_rcu_torture_boost_failure, 1460 n_rcu_torture_boosts, 1461 atomic_long_read(&n_rcu_torture_timers)); 1462 torture_onoff_stats(); 1463 pr_cont("barrier: %ld/%ld:%ld ", 1464 n_barrier_successes, 1465 n_barrier_attempts, 1466 n_rcu_torture_barrier_error); 1467 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods)); 1468 1469 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1470 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1471 n_rcu_torture_barrier_error != 0 || 1472 n_rcu_torture_boost_ktrerror != 0 || 1473 n_rcu_torture_boost_rterror != 0 || 1474 n_rcu_torture_boost_failure != 0 || 1475 i > 1) { 1476 pr_cont("%s", "!!! "); 1477 atomic_inc(&n_rcu_torture_error); 1478 WARN_ON_ONCE(1); 1479 } 1480 pr_cont("Reader Pipe: "); 1481 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1482 pr_cont(" %ld", pipesummary[i]); 1483 pr_cont("\n"); 1484 1485 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1486 pr_cont("Reader Batch: "); 1487 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1488 pr_cont(" %ld", batchsummary[i]); 1489 pr_cont("\n"); 1490 1491 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1492 pr_cont("Free-Block Circulation: "); 1493 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1494 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1495 } 1496 pr_cont("\n"); 1497 1498 if (cur_ops->stats) 1499 cur_ops->stats(); 1500 if (rtcv_snap == rcu_torture_current_version && 1501 rcu_torture_current != NULL) { 1502 int __maybe_unused flags = 0; 1503 unsigned long __maybe_unused gp_seq = 0; 1504 1505 rcutorture_get_gp_data(cur_ops->ttype, 1506 &flags, &gp_seq); 1507 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1508 &flags, &gp_seq); 1509 wtp = READ_ONCE(writer_task); 1510 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1511 rcu_torture_writer_state_getname(), 1512 rcu_torture_writer_state, gp_seq, flags, 1513 wtp == NULL ? ~0UL : wtp->state, 1514 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1515 if (!splatted && wtp) { 1516 sched_show_task(wtp); 1517 splatted = true; 1518 } 1519 show_rcu_gp_kthreads(); 1520 rcu_ftrace_dump(DUMP_ALL); 1521 } 1522 rtcv_snap = rcu_torture_current_version; 1523 } 1524 1525 /* 1526 * Periodically prints torture statistics, if periodic statistics printing 1527 * was specified via the stat_interval module parameter. 1528 */ 1529 static int 1530 rcu_torture_stats(void *arg) 1531 { 1532 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1533 do { 1534 schedule_timeout_interruptible(stat_interval * HZ); 1535 rcu_torture_stats_print(); 1536 torture_shutdown_absorb("rcu_torture_stats"); 1537 } while (!torture_must_stop()); 1538 torture_kthread_stopping("rcu_torture_stats"); 1539 return 0; 1540 } 1541 1542 static void 1543 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1544 { 1545 pr_alert("%s" TORTURE_FLAG 1546 "--- %s: nreaders=%d nfakewriters=%d " 1547 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1548 "shuffle_interval=%d stutter=%d irqreader=%d " 1549 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1550 "test_boost=%d/%d test_boost_interval=%d " 1551 "test_boost_duration=%d shutdown_secs=%d " 1552 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1553 "n_barrier_cbs=%d " 1554 "onoff_interval=%d onoff_holdoff=%d\n", 1555 torture_type, tag, nrealreaders, nfakewriters, 1556 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1557 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1558 test_boost, cur_ops->can_boost, 1559 test_boost_interval, test_boost_duration, shutdown_secs, 1560 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1561 n_barrier_cbs, 1562 onoff_interval, onoff_holdoff); 1563 } 1564 1565 static int rcutorture_booster_cleanup(unsigned int cpu) 1566 { 1567 struct task_struct *t; 1568 1569 if (boost_tasks[cpu] == NULL) 1570 return 0; 1571 mutex_lock(&boost_mutex); 1572 t = boost_tasks[cpu]; 1573 boost_tasks[cpu] = NULL; 1574 rcu_torture_enable_rt_throttle(); 1575 mutex_unlock(&boost_mutex); 1576 1577 /* This must be outside of the mutex, otherwise deadlock! */ 1578 torture_stop_kthread(rcu_torture_boost, t); 1579 return 0; 1580 } 1581 1582 static int rcutorture_booster_init(unsigned int cpu) 1583 { 1584 int retval; 1585 1586 if (boost_tasks[cpu] != NULL) 1587 return 0; /* Already created, nothing more to do. */ 1588 1589 /* Don't allow time recalculation while creating a new task. */ 1590 mutex_lock(&boost_mutex); 1591 rcu_torture_disable_rt_throttle(); 1592 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1593 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1594 cpu_to_node(cpu), 1595 "rcu_torture_boost"); 1596 if (IS_ERR(boost_tasks[cpu])) { 1597 retval = PTR_ERR(boost_tasks[cpu]); 1598 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1599 n_rcu_torture_boost_ktrerror++; 1600 boost_tasks[cpu] = NULL; 1601 mutex_unlock(&boost_mutex); 1602 return retval; 1603 } 1604 kthread_bind(boost_tasks[cpu], cpu); 1605 wake_up_process(boost_tasks[cpu]); 1606 mutex_unlock(&boost_mutex); 1607 return 0; 1608 } 1609 1610 /* 1611 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1612 * induces a CPU stall for the time specified by stall_cpu. 1613 */ 1614 static int rcu_torture_stall(void *args) 1615 { 1616 unsigned long stop_at; 1617 1618 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1619 if (stall_cpu_holdoff > 0) { 1620 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1621 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1622 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1623 } 1624 if (!kthread_should_stop()) { 1625 stop_at = ktime_get_seconds() + stall_cpu; 1626 /* RCU CPU stall is expected behavior in following code. */ 1627 rcu_read_lock(); 1628 if (stall_cpu_irqsoff) 1629 local_irq_disable(); 1630 else 1631 preempt_disable(); 1632 pr_alert("rcu_torture_stall start on CPU %d.\n", 1633 smp_processor_id()); 1634 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1635 stop_at)) 1636 continue; /* Induce RCU CPU stall warning. */ 1637 if (stall_cpu_irqsoff) 1638 local_irq_enable(); 1639 else 1640 preempt_enable(); 1641 rcu_read_unlock(); 1642 pr_alert("rcu_torture_stall end.\n"); 1643 } 1644 torture_shutdown_absorb("rcu_torture_stall"); 1645 while (!kthread_should_stop()) 1646 schedule_timeout_interruptible(10 * HZ); 1647 return 0; 1648 } 1649 1650 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1651 static int __init rcu_torture_stall_init(void) 1652 { 1653 if (stall_cpu <= 0) 1654 return 0; 1655 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1656 } 1657 1658 /* Callback function for RCU barrier testing. */ 1659 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1660 { 1661 atomic_inc(&barrier_cbs_invoked); 1662 } 1663 1664 /* kthread function to register callbacks used to test RCU barriers. */ 1665 static int rcu_torture_barrier_cbs(void *arg) 1666 { 1667 long myid = (long)arg; 1668 bool lastphase = 0; 1669 bool newphase; 1670 struct rcu_head rcu; 1671 1672 init_rcu_head_on_stack(&rcu); 1673 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 1674 set_user_nice(current, MAX_NICE); 1675 do { 1676 wait_event(barrier_cbs_wq[myid], 1677 (newphase = 1678 smp_load_acquire(&barrier_phase)) != lastphase || 1679 torture_must_stop()); 1680 lastphase = newphase; 1681 if (torture_must_stop()) 1682 break; 1683 /* 1684 * The above smp_load_acquire() ensures barrier_phase load 1685 * is ordered before the following ->call(). 1686 */ 1687 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 1688 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 1689 local_irq_enable(); 1690 if (atomic_dec_and_test(&barrier_cbs_count)) 1691 wake_up(&barrier_wq); 1692 } while (!torture_must_stop()); 1693 if (cur_ops->cb_barrier != NULL) 1694 cur_ops->cb_barrier(); 1695 destroy_rcu_head_on_stack(&rcu); 1696 torture_kthread_stopping("rcu_torture_barrier_cbs"); 1697 return 0; 1698 } 1699 1700 /* kthread function to drive and coordinate RCU barrier testing. */ 1701 static int rcu_torture_barrier(void *arg) 1702 { 1703 int i; 1704 1705 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 1706 do { 1707 atomic_set(&barrier_cbs_invoked, 0); 1708 atomic_set(&barrier_cbs_count, n_barrier_cbs); 1709 /* Ensure barrier_phase ordered after prior assignments. */ 1710 smp_store_release(&barrier_phase, !barrier_phase); 1711 for (i = 0; i < n_barrier_cbs; i++) 1712 wake_up(&barrier_cbs_wq[i]); 1713 wait_event(barrier_wq, 1714 atomic_read(&barrier_cbs_count) == 0 || 1715 torture_must_stop()); 1716 if (torture_must_stop()) 1717 break; 1718 n_barrier_attempts++; 1719 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 1720 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 1721 n_rcu_torture_barrier_error++; 1722 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 1723 atomic_read(&barrier_cbs_invoked), 1724 n_barrier_cbs); 1725 WARN_ON_ONCE(1); 1726 } else { 1727 n_barrier_successes++; 1728 } 1729 schedule_timeout_interruptible(HZ / 10); 1730 } while (!torture_must_stop()); 1731 torture_kthread_stopping("rcu_torture_barrier"); 1732 return 0; 1733 } 1734 1735 /* Initialize RCU barrier testing. */ 1736 static int rcu_torture_barrier_init(void) 1737 { 1738 int i; 1739 int ret; 1740 1741 if (n_barrier_cbs <= 0) 1742 return 0; 1743 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 1744 pr_alert("%s" TORTURE_FLAG 1745 " Call or barrier ops missing for %s,\n", 1746 torture_type, cur_ops->name); 1747 pr_alert("%s" TORTURE_FLAG 1748 " RCU barrier testing omitted from run.\n", 1749 torture_type); 1750 return 0; 1751 } 1752 atomic_set(&barrier_cbs_count, 0); 1753 atomic_set(&barrier_cbs_invoked, 0); 1754 barrier_cbs_tasks = 1755 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 1756 GFP_KERNEL); 1757 barrier_cbs_wq = 1758 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 1759 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 1760 return -ENOMEM; 1761 for (i = 0; i < n_barrier_cbs; i++) { 1762 init_waitqueue_head(&barrier_cbs_wq[i]); 1763 ret = torture_create_kthread(rcu_torture_barrier_cbs, 1764 (void *)(long)i, 1765 barrier_cbs_tasks[i]); 1766 if (ret) 1767 return ret; 1768 } 1769 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 1770 } 1771 1772 /* Clean up after RCU barrier testing. */ 1773 static void rcu_torture_barrier_cleanup(void) 1774 { 1775 int i; 1776 1777 torture_stop_kthread(rcu_torture_barrier, barrier_task); 1778 if (barrier_cbs_tasks != NULL) { 1779 for (i = 0; i < n_barrier_cbs; i++) 1780 torture_stop_kthread(rcu_torture_barrier_cbs, 1781 barrier_cbs_tasks[i]); 1782 kfree(barrier_cbs_tasks); 1783 barrier_cbs_tasks = NULL; 1784 } 1785 if (barrier_cbs_wq != NULL) { 1786 kfree(barrier_cbs_wq); 1787 barrier_cbs_wq = NULL; 1788 } 1789 } 1790 1791 static bool rcu_torture_can_boost(void) 1792 { 1793 static int boost_warn_once; 1794 int prio; 1795 1796 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 1797 return false; 1798 1799 prio = rcu_get_gp_kthreads_prio(); 1800 if (!prio) 1801 return false; 1802 1803 if (prio < 2) { 1804 if (boost_warn_once == 1) 1805 return false; 1806 1807 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 1808 boost_warn_once = 1; 1809 return false; 1810 } 1811 1812 return true; 1813 } 1814 1815 static enum cpuhp_state rcutor_hp; 1816 1817 static void 1818 rcu_torture_cleanup(void) 1819 { 1820 int flags = 0; 1821 unsigned long gp_seq = 0; 1822 int i; 1823 1824 if (torture_cleanup_begin()) { 1825 if (cur_ops->cb_barrier != NULL) 1826 cur_ops->cb_barrier(); 1827 return; 1828 } 1829 1830 rcu_torture_barrier_cleanup(); 1831 torture_stop_kthread(rcu_torture_stall, stall_task); 1832 torture_stop_kthread(rcu_torture_writer, writer_task); 1833 1834 if (reader_tasks) { 1835 for (i = 0; i < nrealreaders; i++) 1836 torture_stop_kthread(rcu_torture_reader, 1837 reader_tasks[i]); 1838 kfree(reader_tasks); 1839 } 1840 rcu_torture_current = NULL; 1841 1842 if (fakewriter_tasks) { 1843 for (i = 0; i < nfakewriters; i++) { 1844 torture_stop_kthread(rcu_torture_fakewriter, 1845 fakewriter_tasks[i]); 1846 } 1847 kfree(fakewriter_tasks); 1848 fakewriter_tasks = NULL; 1849 } 1850 1851 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 1852 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 1853 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 1854 cur_ops->name, gp_seq, flags); 1855 torture_stop_kthread(rcu_torture_stats, stats_task); 1856 torture_stop_kthread(rcu_torture_fqs, fqs_task); 1857 for (i = 0; i < ncbflooders; i++) 1858 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); 1859 if (rcu_torture_can_boost()) 1860 cpuhp_remove_state(rcutor_hp); 1861 1862 /* 1863 * Wait for all RCU callbacks to fire, then do flavor-specific 1864 * cleanup operations. 1865 */ 1866 if (cur_ops->cb_barrier != NULL) 1867 cur_ops->cb_barrier(); 1868 if (cur_ops->cleanup != NULL) 1869 cur_ops->cleanup(); 1870 1871 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 1872 1873 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 1874 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 1875 else if (torture_onoff_failures()) 1876 rcu_torture_print_module_parms(cur_ops, 1877 "End of test: RCU_HOTPLUG"); 1878 else 1879 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 1880 torture_cleanup_end(); 1881 } 1882 1883 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1884 static void rcu_torture_leak_cb(struct rcu_head *rhp) 1885 { 1886 } 1887 1888 static void rcu_torture_err_cb(struct rcu_head *rhp) 1889 { 1890 /* 1891 * This -might- happen due to race conditions, but is unlikely. 1892 * The scenario that leads to this happening is that the 1893 * first of the pair of duplicate callbacks is queued, 1894 * someone else starts a grace period that includes that 1895 * callback, then the second of the pair must wait for the 1896 * next grace period. Unlikely, but can happen. If it 1897 * does happen, the debug-objects subsystem won't have splatted. 1898 */ 1899 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 1900 } 1901 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1902 1903 /* 1904 * Verify that double-free causes debug-objects to complain, but only 1905 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 1906 * cannot be carried out. 1907 */ 1908 static void rcu_test_debug_objects(void) 1909 { 1910 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 1911 struct rcu_head rh1; 1912 struct rcu_head rh2; 1913 1914 init_rcu_head_on_stack(&rh1); 1915 init_rcu_head_on_stack(&rh2); 1916 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 1917 1918 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 1919 preempt_disable(); /* Prevent preemption from interrupting test. */ 1920 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 1921 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 1922 local_irq_disable(); /* Make it harder to start a new grace period. */ 1923 call_rcu(&rh2, rcu_torture_leak_cb); 1924 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 1925 local_irq_enable(); 1926 rcu_read_unlock(); 1927 preempt_enable(); 1928 1929 /* Wait for them all to get done so we can safely return. */ 1930 rcu_barrier(); 1931 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 1932 destroy_rcu_head_on_stack(&rh1); 1933 destroy_rcu_head_on_stack(&rh2); 1934 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1935 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 1936 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 1937 } 1938 1939 static int __init 1940 rcu_torture_init(void) 1941 { 1942 int i; 1943 int cpu; 1944 int firsterr = 0; 1945 static struct rcu_torture_ops *torture_ops[] = { 1946 &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 1947 &busted_srcud_ops, &sched_ops, &tasks_ops, 1948 }; 1949 1950 if (!torture_init_begin(torture_type, verbose)) 1951 return -EBUSY; 1952 1953 /* Process args and tell the world that the torturer is on the job. */ 1954 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 1955 cur_ops = torture_ops[i]; 1956 if (strcmp(torture_type, cur_ops->name) == 0) 1957 break; 1958 } 1959 if (i == ARRAY_SIZE(torture_ops)) { 1960 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 1961 torture_type); 1962 pr_alert("rcu-torture types:"); 1963 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 1964 pr_cont(" %s", torture_ops[i]->name); 1965 pr_cont("\n"); 1966 firsterr = -EINVAL; 1967 goto unwind; 1968 } 1969 if (cur_ops->fqs == NULL && fqs_duration != 0) { 1970 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 1971 fqs_duration = 0; 1972 } 1973 if (cur_ops->init) 1974 cur_ops->init(); 1975 1976 if (nreaders >= 0) { 1977 nrealreaders = nreaders; 1978 } else { 1979 nrealreaders = num_online_cpus() - 2 - nreaders; 1980 if (nrealreaders <= 0) 1981 nrealreaders = 1; 1982 } 1983 rcu_torture_print_module_parms(cur_ops, "Start of test"); 1984 1985 /* Set up the freelist. */ 1986 1987 INIT_LIST_HEAD(&rcu_torture_freelist); 1988 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 1989 rcu_tortures[i].rtort_mbtest = 0; 1990 list_add_tail(&rcu_tortures[i].rtort_free, 1991 &rcu_torture_freelist); 1992 } 1993 1994 /* Initialize the statistics so that each run gets its own numbers. */ 1995 1996 rcu_torture_current = NULL; 1997 rcu_torture_current_version = 0; 1998 atomic_set(&n_rcu_torture_alloc, 0); 1999 atomic_set(&n_rcu_torture_alloc_fail, 0); 2000 atomic_set(&n_rcu_torture_free, 0); 2001 atomic_set(&n_rcu_torture_mberror, 0); 2002 atomic_set(&n_rcu_torture_error, 0); 2003 n_rcu_torture_barrier_error = 0; 2004 n_rcu_torture_boost_ktrerror = 0; 2005 n_rcu_torture_boost_rterror = 0; 2006 n_rcu_torture_boost_failure = 0; 2007 n_rcu_torture_boosts = 0; 2008 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2009 atomic_set(&rcu_torture_wcount[i], 0); 2010 for_each_possible_cpu(cpu) { 2011 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2012 per_cpu(rcu_torture_count, cpu)[i] = 0; 2013 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2014 } 2015 } 2016 2017 /* Start up the kthreads. */ 2018 2019 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2020 writer_task); 2021 if (firsterr) 2022 goto unwind; 2023 if (nfakewriters > 0) { 2024 fakewriter_tasks = kcalloc(nfakewriters, 2025 sizeof(fakewriter_tasks[0]), 2026 GFP_KERNEL); 2027 if (fakewriter_tasks == NULL) { 2028 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2029 firsterr = -ENOMEM; 2030 goto unwind; 2031 } 2032 } 2033 for (i = 0; i < nfakewriters; i++) { 2034 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2035 NULL, fakewriter_tasks[i]); 2036 if (firsterr) 2037 goto unwind; 2038 } 2039 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2040 GFP_KERNEL); 2041 if (reader_tasks == NULL) { 2042 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2043 firsterr = -ENOMEM; 2044 goto unwind; 2045 } 2046 for (i = 0; i < nrealreaders; i++) { 2047 firsterr = torture_create_kthread(rcu_torture_reader, NULL, 2048 reader_tasks[i]); 2049 if (firsterr) 2050 goto unwind; 2051 } 2052 if (stat_interval > 0) { 2053 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2054 stats_task); 2055 if (firsterr) 2056 goto unwind; 2057 } 2058 if (test_no_idle_hz && shuffle_interval > 0) { 2059 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2060 if (firsterr) 2061 goto unwind; 2062 } 2063 if (stutter < 0) 2064 stutter = 0; 2065 if (stutter) { 2066 firsterr = torture_stutter_init(stutter * HZ); 2067 if (firsterr) 2068 goto unwind; 2069 } 2070 if (fqs_duration < 0) 2071 fqs_duration = 0; 2072 if (fqs_duration) { 2073 /* Create the fqs thread */ 2074 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2075 fqs_task); 2076 if (firsterr) 2077 goto unwind; 2078 } 2079 if (test_boost_interval < 1) 2080 test_boost_interval = 1; 2081 if (test_boost_duration < 2) 2082 test_boost_duration = 2; 2083 if (rcu_torture_can_boost()) { 2084 2085 boost_starttime = jiffies + test_boost_interval * HZ; 2086 2087 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2088 rcutorture_booster_init, 2089 rcutorture_booster_cleanup); 2090 if (firsterr < 0) 2091 goto unwind; 2092 rcutor_hp = firsterr; 2093 } 2094 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2095 if (firsterr) 2096 goto unwind; 2097 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); 2098 if (firsterr) 2099 goto unwind; 2100 firsterr = rcu_torture_stall_init(); 2101 if (firsterr) 2102 goto unwind; 2103 firsterr = rcu_torture_barrier_init(); 2104 if (firsterr) 2105 goto unwind; 2106 if (object_debug) 2107 rcu_test_debug_objects(); 2108 if (cbflood_n_burst > 0) { 2109 /* Create the cbflood threads */ 2110 ncbflooders = (num_online_cpus() + 3) / 4; 2111 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task), 2112 GFP_KERNEL); 2113 if (!cbflood_task) { 2114 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2115 firsterr = -ENOMEM; 2116 goto unwind; 2117 } 2118 for (i = 0; i < ncbflooders; i++) { 2119 firsterr = torture_create_kthread(rcu_torture_cbflood, 2120 NULL, 2121 cbflood_task[i]); 2122 if (firsterr) 2123 goto unwind; 2124 } 2125 } 2126 torture_init_end(); 2127 return 0; 2128 2129 unwind: 2130 torture_init_end(); 2131 rcu_torture_cleanup(); 2132 return firsterr; 2133 } 2134 2135 module_init(rcu_torture_init); 2136 module_exit(rcu_torture_cleanup); 2137