1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 48 #include "rcu.h" 49 50 MODULE_LICENSE("GPL"); 51 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 52 53 54 /* Bits for ->extendables field, extendables param, and related definitions. */ 55 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 56 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 57 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 58 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 59 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 60 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 61 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 62 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 63 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 64 #define RCUTORTURE_MAX_EXTEND \ 65 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 66 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 67 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 68 /* Must be power of two minus one. */ 69 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 70 71 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 72 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 73 torture_param(int, fqs_duration, 0, 74 "Duration of fqs bursts (us), 0 to disable"); 75 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 76 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 77 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 78 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 79 torture_param(int, fwd_progress_holdoff, 60, 80 "Time between forward-progress tests (s)"); 81 torture_param(bool, fwd_progress_need_resched, 1, 82 "Hide cond_resched() behind need_resched()"); 83 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 84 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 85 torture_param(bool, gp_normal, false, 86 "Use normal (non-expedited) GP wait primitives"); 87 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 88 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 89 torture_param(int, n_barrier_cbs, 0, 90 "# of callbacks/kthreads for barrier testing"); 91 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 92 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 93 torture_param(int, object_debug, 0, 94 "Enable debug-object double call_rcu() testing"); 95 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 96 torture_param(int, onoff_interval, 0, 97 "Time between CPU hotplugs (jiffies), 0=disable"); 98 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 99 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 100 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 101 torture_param(int, stall_cpu_holdoff, 10, 102 "Time to wait before starting stall (s)."); 103 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 104 torture_param(int, stat_interval, 60, 105 "Number of seconds between stats printk()s"); 106 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 107 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 108 torture_param(int, test_boost_duration, 4, 109 "Duration of each boost test, seconds."); 110 torture_param(int, test_boost_interval, 7, 111 "Interval between boost tests, seconds."); 112 torture_param(bool, test_no_idle_hz, true, 113 "Test support for tickless idle CPUs"); 114 torture_param(int, verbose, 1, 115 "Enable verbose debugging printk()s"); 116 117 static char *torture_type = "rcu"; 118 module_param(torture_type, charp, 0444); 119 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 120 121 static int nrealreaders; 122 static struct task_struct *writer_task; 123 static struct task_struct **fakewriter_tasks; 124 static struct task_struct **reader_tasks; 125 static struct task_struct *stats_task; 126 static struct task_struct *fqs_task; 127 static struct task_struct *boost_tasks[NR_CPUS]; 128 static struct task_struct *stall_task; 129 static struct task_struct *fwd_prog_task; 130 static struct task_struct **barrier_cbs_tasks; 131 static struct task_struct *barrier_task; 132 133 #define RCU_TORTURE_PIPE_LEN 10 134 135 struct rcu_torture { 136 struct rcu_head rtort_rcu; 137 int rtort_pipe_count; 138 struct list_head rtort_free; 139 int rtort_mbtest; 140 }; 141 142 static LIST_HEAD(rcu_torture_freelist); 143 static struct rcu_torture __rcu *rcu_torture_current; 144 static unsigned long rcu_torture_current_version; 145 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 146 static DEFINE_SPINLOCK(rcu_torture_lock); 147 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 149 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 150 static atomic_t n_rcu_torture_alloc; 151 static atomic_t n_rcu_torture_alloc_fail; 152 static atomic_t n_rcu_torture_free; 153 static atomic_t n_rcu_torture_mberror; 154 static atomic_t n_rcu_torture_error; 155 static long n_rcu_torture_barrier_error; 156 static long n_rcu_torture_boost_ktrerror; 157 static long n_rcu_torture_boost_rterror; 158 static long n_rcu_torture_boost_failure; 159 static long n_rcu_torture_boosts; 160 static atomic_long_t n_rcu_torture_timers; 161 static long n_barrier_attempts; 162 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 163 static struct list_head rcu_torture_removed; 164 165 static int rcu_torture_writer_state; 166 #define RTWS_FIXED_DELAY 0 167 #define RTWS_DELAY 1 168 #define RTWS_REPLACE 2 169 #define RTWS_DEF_FREE 3 170 #define RTWS_EXP_SYNC 4 171 #define RTWS_COND_GET 5 172 #define RTWS_COND_SYNC 6 173 #define RTWS_SYNC 7 174 #define RTWS_STUTTER 8 175 #define RTWS_STOPPING 9 176 static const char * const rcu_torture_writer_state_names[] = { 177 "RTWS_FIXED_DELAY", 178 "RTWS_DELAY", 179 "RTWS_REPLACE", 180 "RTWS_DEF_FREE", 181 "RTWS_EXP_SYNC", 182 "RTWS_COND_GET", 183 "RTWS_COND_SYNC", 184 "RTWS_SYNC", 185 "RTWS_STUTTER", 186 "RTWS_STOPPING", 187 }; 188 189 /* Record reader segment types and duration for first failing read. */ 190 struct rt_read_seg { 191 int rt_readstate; 192 unsigned long rt_delay_jiffies; 193 unsigned long rt_delay_ms; 194 unsigned long rt_delay_us; 195 bool rt_preempted; 196 }; 197 static int err_segs_recorded; 198 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 199 static int rt_read_nsegs; 200 201 static const char *rcu_torture_writer_state_getname(void) 202 { 203 unsigned int i = READ_ONCE(rcu_torture_writer_state); 204 205 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 206 return "???"; 207 return rcu_torture_writer_state_names[i]; 208 } 209 210 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 211 #define rcu_can_boost() 1 212 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 213 #define rcu_can_boost() 0 214 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 215 216 #ifdef CONFIG_RCU_TRACE 217 static u64 notrace rcu_trace_clock_local(void) 218 { 219 u64 ts = trace_clock_local(); 220 221 (void)do_div(ts, NSEC_PER_USEC); 222 return ts; 223 } 224 #else /* #ifdef CONFIG_RCU_TRACE */ 225 static u64 notrace rcu_trace_clock_local(void) 226 { 227 return 0ULL; 228 } 229 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 230 231 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 232 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 233 /* and boost task create/destroy. */ 234 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 235 static bool barrier_phase; /* Test phase. */ 236 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 237 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 238 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 239 240 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 241 242 /* 243 * Allocate an element from the rcu_tortures pool. 244 */ 245 static struct rcu_torture * 246 rcu_torture_alloc(void) 247 { 248 struct list_head *p; 249 250 spin_lock_bh(&rcu_torture_lock); 251 if (list_empty(&rcu_torture_freelist)) { 252 atomic_inc(&n_rcu_torture_alloc_fail); 253 spin_unlock_bh(&rcu_torture_lock); 254 return NULL; 255 } 256 atomic_inc(&n_rcu_torture_alloc); 257 p = rcu_torture_freelist.next; 258 list_del_init(p); 259 spin_unlock_bh(&rcu_torture_lock); 260 return container_of(p, struct rcu_torture, rtort_free); 261 } 262 263 /* 264 * Free an element to the rcu_tortures pool. 265 */ 266 static void 267 rcu_torture_free(struct rcu_torture *p) 268 { 269 atomic_inc(&n_rcu_torture_free); 270 spin_lock_bh(&rcu_torture_lock); 271 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 272 spin_unlock_bh(&rcu_torture_lock); 273 } 274 275 /* 276 * Operations vector for selecting different types of tests. 277 */ 278 279 struct rcu_torture_ops { 280 int ttype; 281 void (*init)(void); 282 void (*cleanup)(void); 283 int (*readlock)(void); 284 void (*read_delay)(struct torture_random_state *rrsp, 285 struct rt_read_seg *rtrsp); 286 void (*readunlock)(int idx); 287 unsigned long (*get_gp_seq)(void); 288 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 289 void (*deferred_free)(struct rcu_torture *p); 290 void (*sync)(void); 291 void (*exp_sync)(void); 292 unsigned long (*get_state)(void); 293 void (*cond_sync)(unsigned long oldstate); 294 call_rcu_func_t call; 295 void (*cb_barrier)(void); 296 void (*fqs)(void); 297 void (*stats)(void); 298 int (*stall_dur)(void); 299 int irq_capable; 300 int can_boost; 301 int extendables; 302 int slow_gps; 303 const char *name; 304 }; 305 306 static struct rcu_torture_ops *cur_ops; 307 308 /* 309 * Definitions for rcu torture testing. 310 */ 311 312 static int rcu_torture_read_lock(void) __acquires(RCU) 313 { 314 rcu_read_lock(); 315 return 0; 316 } 317 318 static void 319 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 320 { 321 unsigned long started; 322 unsigned long completed; 323 const unsigned long shortdelay_us = 200; 324 unsigned long longdelay_ms = 300; 325 unsigned long long ts; 326 327 /* We want a short delay sometimes to make a reader delay the grace 328 * period, and we want a long delay occasionally to trigger 329 * force_quiescent_state. */ 330 331 if (!rcu_fwd_cb_nodelay && 332 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 333 started = cur_ops->get_gp_seq(); 334 ts = rcu_trace_clock_local(); 335 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 336 longdelay_ms = 5; /* Avoid triggering BH limits. */ 337 mdelay(longdelay_ms); 338 rtrsp->rt_delay_ms = longdelay_ms; 339 completed = cur_ops->get_gp_seq(); 340 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 341 started, completed); 342 } 343 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 344 udelay(shortdelay_us); 345 rtrsp->rt_delay_us = shortdelay_us; 346 } 347 if (!preempt_count() && 348 !(torture_random(rrsp) % (nrealreaders * 500))) { 349 torture_preempt_schedule(); /* QS only if preemptible. */ 350 rtrsp->rt_preempted = true; 351 } 352 } 353 354 static void rcu_torture_read_unlock(int idx) __releases(RCU) 355 { 356 rcu_read_unlock(); 357 } 358 359 /* 360 * Update callback in the pipe. This should be invoked after a grace period. 361 */ 362 static bool 363 rcu_torture_pipe_update_one(struct rcu_torture *rp) 364 { 365 int i; 366 367 i = rp->rtort_pipe_count; 368 if (i > RCU_TORTURE_PIPE_LEN) 369 i = RCU_TORTURE_PIPE_LEN; 370 atomic_inc(&rcu_torture_wcount[i]); 371 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 372 rp->rtort_mbtest = 0; 373 return true; 374 } 375 return false; 376 } 377 378 /* 379 * Update all callbacks in the pipe. Suitable for synchronous grace-period 380 * primitives. 381 */ 382 static void 383 rcu_torture_pipe_update(struct rcu_torture *old_rp) 384 { 385 struct rcu_torture *rp; 386 struct rcu_torture *rp1; 387 388 if (old_rp) 389 list_add(&old_rp->rtort_free, &rcu_torture_removed); 390 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 391 if (rcu_torture_pipe_update_one(rp)) { 392 list_del(&rp->rtort_free); 393 rcu_torture_free(rp); 394 } 395 } 396 } 397 398 static void 399 rcu_torture_cb(struct rcu_head *p) 400 { 401 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 402 403 if (torture_must_stop_irq()) { 404 /* Test is ending, just drop callbacks on the floor. */ 405 /* The next initialization will pick up the pieces. */ 406 return; 407 } 408 if (rcu_torture_pipe_update_one(rp)) 409 rcu_torture_free(rp); 410 else 411 cur_ops->deferred_free(rp); 412 } 413 414 static unsigned long rcu_no_completed(void) 415 { 416 return 0; 417 } 418 419 static void rcu_torture_deferred_free(struct rcu_torture *p) 420 { 421 call_rcu(&p->rtort_rcu, rcu_torture_cb); 422 } 423 424 static void rcu_sync_torture_init(void) 425 { 426 INIT_LIST_HEAD(&rcu_torture_removed); 427 } 428 429 static struct rcu_torture_ops rcu_ops = { 430 .ttype = RCU_FLAVOR, 431 .init = rcu_sync_torture_init, 432 .readlock = rcu_torture_read_lock, 433 .read_delay = rcu_read_delay, 434 .readunlock = rcu_torture_read_unlock, 435 .get_gp_seq = rcu_get_gp_seq, 436 .gp_diff = rcu_seq_diff, 437 .deferred_free = rcu_torture_deferred_free, 438 .sync = synchronize_rcu, 439 .exp_sync = synchronize_rcu_expedited, 440 .get_state = get_state_synchronize_rcu, 441 .cond_sync = cond_synchronize_rcu, 442 .call = call_rcu, 443 .cb_barrier = rcu_barrier, 444 .fqs = rcu_force_quiescent_state, 445 .stats = NULL, 446 .stall_dur = rcu_jiffies_till_stall_check, 447 .irq_capable = 1, 448 .can_boost = rcu_can_boost(), 449 .extendables = RCUTORTURE_MAX_EXTEND, 450 .name = "rcu" 451 }; 452 453 /* 454 * Don't even think about trying any of these in real life!!! 455 * The names includes "busted", and they really means it! 456 * The only purpose of these functions is to provide a buggy RCU 457 * implementation to make sure that rcutorture correctly emits 458 * buggy-RCU error messages. 459 */ 460 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 461 { 462 /* This is a deliberate bug for testing purposes only! */ 463 rcu_torture_cb(&p->rtort_rcu); 464 } 465 466 static void synchronize_rcu_busted(void) 467 { 468 /* This is a deliberate bug for testing purposes only! */ 469 } 470 471 static void 472 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 473 { 474 /* This is a deliberate bug for testing purposes only! */ 475 func(head); 476 } 477 478 static struct rcu_torture_ops rcu_busted_ops = { 479 .ttype = INVALID_RCU_FLAVOR, 480 .init = rcu_sync_torture_init, 481 .readlock = rcu_torture_read_lock, 482 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 483 .readunlock = rcu_torture_read_unlock, 484 .get_gp_seq = rcu_no_completed, 485 .deferred_free = rcu_busted_torture_deferred_free, 486 .sync = synchronize_rcu_busted, 487 .exp_sync = synchronize_rcu_busted, 488 .call = call_rcu_busted, 489 .cb_barrier = NULL, 490 .fqs = NULL, 491 .stats = NULL, 492 .irq_capable = 1, 493 .name = "busted" 494 }; 495 496 /* 497 * Definitions for srcu torture testing. 498 */ 499 500 DEFINE_STATIC_SRCU(srcu_ctl); 501 static struct srcu_struct srcu_ctld; 502 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 503 504 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 505 { 506 return srcu_read_lock(srcu_ctlp); 507 } 508 509 static void 510 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 511 { 512 long delay; 513 const long uspertick = 1000000 / HZ; 514 const long longdelay = 10; 515 516 /* We want there to be long-running readers, but not all the time. */ 517 518 delay = torture_random(rrsp) % 519 (nrealreaders * 2 * longdelay * uspertick); 520 if (!delay && in_task()) { 521 schedule_timeout_interruptible(longdelay); 522 rtrsp->rt_delay_jiffies = longdelay; 523 } else { 524 rcu_read_delay(rrsp, rtrsp); 525 } 526 } 527 528 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 529 { 530 srcu_read_unlock(srcu_ctlp, idx); 531 } 532 533 static unsigned long srcu_torture_completed(void) 534 { 535 return srcu_batches_completed(srcu_ctlp); 536 } 537 538 static void srcu_torture_deferred_free(struct rcu_torture *rp) 539 { 540 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 541 } 542 543 static void srcu_torture_synchronize(void) 544 { 545 synchronize_srcu(srcu_ctlp); 546 } 547 548 static void srcu_torture_call(struct rcu_head *head, 549 rcu_callback_t func) 550 { 551 call_srcu(srcu_ctlp, head, func); 552 } 553 554 static void srcu_torture_barrier(void) 555 { 556 srcu_barrier(srcu_ctlp); 557 } 558 559 static void srcu_torture_stats(void) 560 { 561 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 562 } 563 564 static void srcu_torture_synchronize_expedited(void) 565 { 566 synchronize_srcu_expedited(srcu_ctlp); 567 } 568 569 static struct rcu_torture_ops srcu_ops = { 570 .ttype = SRCU_FLAVOR, 571 .init = rcu_sync_torture_init, 572 .readlock = srcu_torture_read_lock, 573 .read_delay = srcu_read_delay, 574 .readunlock = srcu_torture_read_unlock, 575 .get_gp_seq = srcu_torture_completed, 576 .deferred_free = srcu_torture_deferred_free, 577 .sync = srcu_torture_synchronize, 578 .exp_sync = srcu_torture_synchronize_expedited, 579 .call = srcu_torture_call, 580 .cb_barrier = srcu_torture_barrier, 581 .stats = srcu_torture_stats, 582 .irq_capable = 1, 583 .name = "srcu" 584 }; 585 586 static void srcu_torture_init(void) 587 { 588 rcu_sync_torture_init(); 589 WARN_ON(init_srcu_struct(&srcu_ctld)); 590 srcu_ctlp = &srcu_ctld; 591 } 592 593 static void srcu_torture_cleanup(void) 594 { 595 cleanup_srcu_struct(&srcu_ctld); 596 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 597 } 598 599 /* As above, but dynamically allocated. */ 600 static struct rcu_torture_ops srcud_ops = { 601 .ttype = SRCU_FLAVOR, 602 .init = srcu_torture_init, 603 .cleanup = srcu_torture_cleanup, 604 .readlock = srcu_torture_read_lock, 605 .read_delay = srcu_read_delay, 606 .readunlock = srcu_torture_read_unlock, 607 .get_gp_seq = srcu_torture_completed, 608 .deferred_free = srcu_torture_deferred_free, 609 .sync = srcu_torture_synchronize, 610 .exp_sync = srcu_torture_synchronize_expedited, 611 .call = srcu_torture_call, 612 .cb_barrier = srcu_torture_barrier, 613 .stats = srcu_torture_stats, 614 .irq_capable = 1, 615 .name = "srcud" 616 }; 617 618 /* As above, but broken due to inappropriate reader extension. */ 619 static struct rcu_torture_ops busted_srcud_ops = { 620 .ttype = SRCU_FLAVOR, 621 .init = srcu_torture_init, 622 .cleanup = srcu_torture_cleanup, 623 .readlock = srcu_torture_read_lock, 624 .read_delay = rcu_read_delay, 625 .readunlock = srcu_torture_read_unlock, 626 .get_gp_seq = srcu_torture_completed, 627 .deferred_free = srcu_torture_deferred_free, 628 .sync = srcu_torture_synchronize, 629 .exp_sync = srcu_torture_synchronize_expedited, 630 .call = srcu_torture_call, 631 .cb_barrier = srcu_torture_barrier, 632 .stats = srcu_torture_stats, 633 .irq_capable = 1, 634 .extendables = RCUTORTURE_MAX_EXTEND, 635 .name = "busted_srcud" 636 }; 637 638 /* 639 * Definitions for RCU-tasks torture testing. 640 */ 641 642 static int tasks_torture_read_lock(void) 643 { 644 return 0; 645 } 646 647 static void tasks_torture_read_unlock(int idx) 648 { 649 } 650 651 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 652 { 653 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 654 } 655 656 static struct rcu_torture_ops tasks_ops = { 657 .ttype = RCU_TASKS_FLAVOR, 658 .init = rcu_sync_torture_init, 659 .readlock = tasks_torture_read_lock, 660 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 661 .readunlock = tasks_torture_read_unlock, 662 .get_gp_seq = rcu_no_completed, 663 .deferred_free = rcu_tasks_torture_deferred_free, 664 .sync = synchronize_rcu_tasks, 665 .exp_sync = synchronize_rcu_tasks, 666 .call = call_rcu_tasks, 667 .cb_barrier = rcu_barrier_tasks, 668 .fqs = NULL, 669 .stats = NULL, 670 .irq_capable = 1, 671 .slow_gps = 1, 672 .name = "tasks" 673 }; 674 675 /* 676 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 677 * This implementation does not necessarily work well with CPU hotplug. 678 */ 679 680 static void synchronize_rcu_trivial(void) 681 { 682 int cpu; 683 684 for_each_online_cpu(cpu) { 685 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 686 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 687 } 688 } 689 690 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 691 { 692 preempt_disable(); 693 return 0; 694 } 695 696 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 697 { 698 preempt_enable(); 699 } 700 701 static struct rcu_torture_ops trivial_ops = { 702 .ttype = RCU_TRIVIAL_FLAVOR, 703 .init = rcu_sync_torture_init, 704 .readlock = rcu_torture_read_lock_trivial, 705 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 706 .readunlock = rcu_torture_read_unlock_trivial, 707 .get_gp_seq = rcu_no_completed, 708 .sync = synchronize_rcu_trivial, 709 .exp_sync = synchronize_rcu_trivial, 710 .fqs = NULL, 711 .stats = NULL, 712 .irq_capable = 1, 713 .name = "trivial" 714 }; 715 716 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 717 { 718 if (!cur_ops->gp_diff) 719 return new - old; 720 return cur_ops->gp_diff(new, old); 721 } 722 723 static bool __maybe_unused torturing_tasks(void) 724 { 725 return cur_ops == &tasks_ops; 726 } 727 728 /* 729 * RCU torture priority-boost testing. Runs one real-time thread per 730 * CPU for moderate bursts, repeatedly registering RCU callbacks and 731 * spinning waiting for them to be invoked. If a given callback takes 732 * too long to be invoked, we assume that priority inversion has occurred. 733 */ 734 735 struct rcu_boost_inflight { 736 struct rcu_head rcu; 737 int inflight; 738 }; 739 740 static void rcu_torture_boost_cb(struct rcu_head *head) 741 { 742 struct rcu_boost_inflight *rbip = 743 container_of(head, struct rcu_boost_inflight, rcu); 744 745 /* Ensure RCU-core accesses precede clearing ->inflight */ 746 smp_store_release(&rbip->inflight, 0); 747 } 748 749 static int old_rt_runtime = -1; 750 751 static void rcu_torture_disable_rt_throttle(void) 752 { 753 /* 754 * Disable RT throttling so that rcutorture's boost threads don't get 755 * throttled. Only possible if rcutorture is built-in otherwise the 756 * user should manually do this by setting the sched_rt_period_us and 757 * sched_rt_runtime sysctls. 758 */ 759 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 760 return; 761 762 old_rt_runtime = sysctl_sched_rt_runtime; 763 sysctl_sched_rt_runtime = -1; 764 } 765 766 static void rcu_torture_enable_rt_throttle(void) 767 { 768 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 769 return; 770 771 sysctl_sched_rt_runtime = old_rt_runtime; 772 old_rt_runtime = -1; 773 } 774 775 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 776 { 777 if (end - start > test_boost_duration * HZ - HZ / 2) { 778 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 779 n_rcu_torture_boost_failure++; 780 781 return true; /* failed */ 782 } 783 784 return false; /* passed */ 785 } 786 787 static int rcu_torture_boost(void *arg) 788 { 789 unsigned long call_rcu_time; 790 unsigned long endtime; 791 unsigned long oldstarttime; 792 struct rcu_boost_inflight rbi = { .inflight = 0 }; 793 struct sched_param sp; 794 795 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 796 797 /* Set real-time priority. */ 798 sp.sched_priority = 1; 799 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 800 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 801 n_rcu_torture_boost_rterror++; 802 } 803 804 init_rcu_head_on_stack(&rbi.rcu); 805 /* Each pass through the following loop does one boost-test cycle. */ 806 do { 807 /* Track if the test failed already in this test interval? */ 808 bool failed = false; 809 810 /* Increment n_rcu_torture_boosts once per boost-test */ 811 while (!kthread_should_stop()) { 812 if (mutex_trylock(&boost_mutex)) { 813 n_rcu_torture_boosts++; 814 mutex_unlock(&boost_mutex); 815 break; 816 } 817 schedule_timeout_uninterruptible(1); 818 } 819 if (kthread_should_stop()) 820 goto checkwait; 821 822 /* Wait for the next test interval. */ 823 oldstarttime = boost_starttime; 824 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 825 schedule_timeout_interruptible(oldstarttime - jiffies); 826 stutter_wait("rcu_torture_boost"); 827 if (torture_must_stop()) 828 goto checkwait; 829 } 830 831 /* Do one boost-test interval. */ 832 endtime = oldstarttime + test_boost_duration * HZ; 833 call_rcu_time = jiffies; 834 while (ULONG_CMP_LT(jiffies, endtime)) { 835 /* If we don't have a callback in flight, post one. */ 836 if (!smp_load_acquire(&rbi.inflight)) { 837 /* RCU core before ->inflight = 1. */ 838 smp_store_release(&rbi.inflight, 1); 839 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 840 /* Check if the boost test failed */ 841 failed = failed || 842 rcu_torture_boost_failed(call_rcu_time, 843 jiffies); 844 call_rcu_time = jiffies; 845 } 846 stutter_wait("rcu_torture_boost"); 847 if (torture_must_stop()) 848 goto checkwait; 849 } 850 851 /* 852 * If boost never happened, then inflight will always be 1, in 853 * this case the boost check would never happen in the above 854 * loop so do another one here. 855 */ 856 if (!failed && smp_load_acquire(&rbi.inflight)) 857 rcu_torture_boost_failed(call_rcu_time, jiffies); 858 859 /* 860 * Set the start time of the next test interval. 861 * Yes, this is vulnerable to long delays, but such 862 * delays simply cause a false negative for the next 863 * interval. Besides, we are running at RT priority, 864 * so delays should be relatively rare. 865 */ 866 while (oldstarttime == boost_starttime && 867 !kthread_should_stop()) { 868 if (mutex_trylock(&boost_mutex)) { 869 boost_starttime = jiffies + 870 test_boost_interval * HZ; 871 mutex_unlock(&boost_mutex); 872 break; 873 } 874 schedule_timeout_uninterruptible(1); 875 } 876 877 /* Go do the stutter. */ 878 checkwait: stutter_wait("rcu_torture_boost"); 879 } while (!torture_must_stop()); 880 881 /* Clean up and exit. */ 882 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 883 torture_shutdown_absorb("rcu_torture_boost"); 884 schedule_timeout_uninterruptible(1); 885 } 886 destroy_rcu_head_on_stack(&rbi.rcu); 887 torture_kthread_stopping("rcu_torture_boost"); 888 return 0; 889 } 890 891 /* 892 * RCU torture force-quiescent-state kthread. Repeatedly induces 893 * bursts of calls to force_quiescent_state(), increasing the probability 894 * of occurrence of some important types of race conditions. 895 */ 896 static int 897 rcu_torture_fqs(void *arg) 898 { 899 unsigned long fqs_resume_time; 900 int fqs_burst_remaining; 901 902 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 903 do { 904 fqs_resume_time = jiffies + fqs_stutter * HZ; 905 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 906 !kthread_should_stop()) { 907 schedule_timeout_interruptible(1); 908 } 909 fqs_burst_remaining = fqs_duration; 910 while (fqs_burst_remaining > 0 && 911 !kthread_should_stop()) { 912 cur_ops->fqs(); 913 udelay(fqs_holdoff); 914 fqs_burst_remaining -= fqs_holdoff; 915 } 916 stutter_wait("rcu_torture_fqs"); 917 } while (!torture_must_stop()); 918 torture_kthread_stopping("rcu_torture_fqs"); 919 return 0; 920 } 921 922 /* 923 * RCU torture writer kthread. Repeatedly substitutes a new structure 924 * for that pointed to by rcu_torture_current, freeing the old structure 925 * after a series of grace periods (the "pipeline"). 926 */ 927 static int 928 rcu_torture_writer(void *arg) 929 { 930 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 931 int expediting = 0; 932 unsigned long gp_snap; 933 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 934 bool gp_sync1 = gp_sync; 935 int i; 936 struct rcu_torture *rp; 937 struct rcu_torture *old_rp; 938 static DEFINE_TORTURE_RANDOM(rand); 939 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 940 RTWS_COND_GET, RTWS_SYNC }; 941 int nsynctypes = 0; 942 943 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 944 if (!can_expedite) 945 pr_alert("%s" TORTURE_FLAG 946 " GP expediting controlled from boot/sysfs for %s.\n", 947 torture_type, cur_ops->name); 948 949 /* Initialize synctype[] array. If none set, take default. */ 950 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 951 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 952 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 953 synctype[nsynctypes++] = RTWS_COND_GET; 954 pr_info("%s: Testing conditional GPs.\n", __func__); 955 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 956 pr_alert("%s: gp_cond without primitives.\n", __func__); 957 } 958 if (gp_exp1 && cur_ops->exp_sync) { 959 synctype[nsynctypes++] = RTWS_EXP_SYNC; 960 pr_info("%s: Testing expedited GPs.\n", __func__); 961 } else if (gp_exp && !cur_ops->exp_sync) { 962 pr_alert("%s: gp_exp without primitives.\n", __func__); 963 } 964 if (gp_normal1 && cur_ops->deferred_free) { 965 synctype[nsynctypes++] = RTWS_DEF_FREE; 966 pr_info("%s: Testing asynchronous GPs.\n", __func__); 967 } else if (gp_normal && !cur_ops->deferred_free) { 968 pr_alert("%s: gp_normal without primitives.\n", __func__); 969 } 970 if (gp_sync1 && cur_ops->sync) { 971 synctype[nsynctypes++] = RTWS_SYNC; 972 pr_info("%s: Testing normal GPs.\n", __func__); 973 } else if (gp_sync && !cur_ops->sync) { 974 pr_alert("%s: gp_sync without primitives.\n", __func__); 975 } 976 if (WARN_ONCE(nsynctypes == 0, 977 "rcu_torture_writer: No update-side primitives.\n")) { 978 /* 979 * No updates primitives, so don't try updating. 980 * The resulting test won't be testing much, hence the 981 * above WARN_ONCE(). 982 */ 983 rcu_torture_writer_state = RTWS_STOPPING; 984 torture_kthread_stopping("rcu_torture_writer"); 985 } 986 987 do { 988 rcu_torture_writer_state = RTWS_FIXED_DELAY; 989 schedule_timeout_uninterruptible(1); 990 rp = rcu_torture_alloc(); 991 if (rp == NULL) 992 continue; 993 rp->rtort_pipe_count = 0; 994 rcu_torture_writer_state = RTWS_DELAY; 995 udelay(torture_random(&rand) & 0x3ff); 996 rcu_torture_writer_state = RTWS_REPLACE; 997 old_rp = rcu_dereference_check(rcu_torture_current, 998 current == writer_task); 999 rp->rtort_mbtest = 1; 1000 rcu_assign_pointer(rcu_torture_current, rp); 1001 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1002 if (old_rp) { 1003 i = old_rp->rtort_pipe_count; 1004 if (i > RCU_TORTURE_PIPE_LEN) 1005 i = RCU_TORTURE_PIPE_LEN; 1006 atomic_inc(&rcu_torture_wcount[i]); 1007 old_rp->rtort_pipe_count++; 1008 switch (synctype[torture_random(&rand) % nsynctypes]) { 1009 case RTWS_DEF_FREE: 1010 rcu_torture_writer_state = RTWS_DEF_FREE; 1011 cur_ops->deferred_free(old_rp); 1012 break; 1013 case RTWS_EXP_SYNC: 1014 rcu_torture_writer_state = RTWS_EXP_SYNC; 1015 cur_ops->exp_sync(); 1016 rcu_torture_pipe_update(old_rp); 1017 break; 1018 case RTWS_COND_GET: 1019 rcu_torture_writer_state = RTWS_COND_GET; 1020 gp_snap = cur_ops->get_state(); 1021 i = torture_random(&rand) % 16; 1022 if (i != 0) 1023 schedule_timeout_interruptible(i); 1024 udelay(torture_random(&rand) % 1000); 1025 rcu_torture_writer_state = RTWS_COND_SYNC; 1026 cur_ops->cond_sync(gp_snap); 1027 rcu_torture_pipe_update(old_rp); 1028 break; 1029 case RTWS_SYNC: 1030 rcu_torture_writer_state = RTWS_SYNC; 1031 cur_ops->sync(); 1032 rcu_torture_pipe_update(old_rp); 1033 break; 1034 default: 1035 WARN_ON_ONCE(1); 1036 break; 1037 } 1038 } 1039 WRITE_ONCE(rcu_torture_current_version, 1040 rcu_torture_current_version + 1); 1041 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1042 if (can_expedite && 1043 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1044 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1045 if (expediting >= 0) 1046 rcu_expedite_gp(); 1047 else 1048 rcu_unexpedite_gp(); 1049 if (++expediting > 3) 1050 expediting = -expediting; 1051 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1052 can_expedite = !rcu_gp_is_expedited() && 1053 !rcu_gp_is_normal(); 1054 } 1055 rcu_torture_writer_state = RTWS_STUTTER; 1056 if (stutter_wait("rcu_torture_writer") && 1057 !READ_ONCE(rcu_fwd_cb_nodelay) && 1058 !cur_ops->slow_gps && 1059 !torture_must_stop()) 1060 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1061 if (list_empty(&rcu_tortures[i].rtort_free) && 1062 rcu_access_pointer(rcu_torture_current) != 1063 &rcu_tortures[i]) { 1064 rcu_ftrace_dump(DUMP_ALL); 1065 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1066 } 1067 } while (!torture_must_stop()); 1068 /* Reset expediting back to unexpedited. */ 1069 if (expediting > 0) 1070 expediting = -expediting; 1071 while (can_expedite && expediting++ < 0) 1072 rcu_unexpedite_gp(); 1073 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1074 if (!can_expedite) 1075 pr_alert("%s" TORTURE_FLAG 1076 " Dynamic grace-period expediting was disabled.\n", 1077 torture_type); 1078 rcu_torture_writer_state = RTWS_STOPPING; 1079 torture_kthread_stopping("rcu_torture_writer"); 1080 return 0; 1081 } 1082 1083 /* 1084 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1085 * delay between calls. 1086 */ 1087 static int 1088 rcu_torture_fakewriter(void *arg) 1089 { 1090 DEFINE_TORTURE_RANDOM(rand); 1091 1092 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1093 set_user_nice(current, MAX_NICE); 1094 1095 do { 1096 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1097 udelay(torture_random(&rand) & 0x3ff); 1098 if (cur_ops->cb_barrier != NULL && 1099 torture_random(&rand) % (nfakewriters * 8) == 0) { 1100 cur_ops->cb_barrier(); 1101 } else if (gp_normal == gp_exp) { 1102 if (cur_ops->sync && torture_random(&rand) & 0x80) 1103 cur_ops->sync(); 1104 else if (cur_ops->exp_sync) 1105 cur_ops->exp_sync(); 1106 } else if (gp_normal && cur_ops->sync) { 1107 cur_ops->sync(); 1108 } else if (cur_ops->exp_sync) { 1109 cur_ops->exp_sync(); 1110 } 1111 stutter_wait("rcu_torture_fakewriter"); 1112 } while (!torture_must_stop()); 1113 1114 torture_kthread_stopping("rcu_torture_fakewriter"); 1115 return 0; 1116 } 1117 1118 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1119 { 1120 kfree(rhp); 1121 } 1122 1123 /* 1124 * Do one extension of an RCU read-side critical section using the 1125 * current reader state in readstate (set to zero for initial entry 1126 * to extended critical section), set the new state as specified by 1127 * newstate (set to zero for final exit from extended critical section), 1128 * and random-number-generator state in trsp. If this is neither the 1129 * beginning or end of the critical section and if there was actually a 1130 * change, do a ->read_delay(). 1131 */ 1132 static void rcutorture_one_extend(int *readstate, int newstate, 1133 struct torture_random_state *trsp, 1134 struct rt_read_seg *rtrsp) 1135 { 1136 int idxnew = -1; 1137 int idxold = *readstate; 1138 int statesnew = ~*readstate & newstate; 1139 int statesold = *readstate & ~newstate; 1140 1141 WARN_ON_ONCE(idxold < 0); 1142 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1143 rtrsp->rt_readstate = newstate; 1144 1145 /* First, put new protection in place to avoid critical-section gap. */ 1146 if (statesnew & RCUTORTURE_RDR_BH) 1147 local_bh_disable(); 1148 if (statesnew & RCUTORTURE_RDR_IRQ) 1149 local_irq_disable(); 1150 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1151 preempt_disable(); 1152 if (statesnew & RCUTORTURE_RDR_RBH) 1153 rcu_read_lock_bh(); 1154 if (statesnew & RCUTORTURE_RDR_SCHED) 1155 rcu_read_lock_sched(); 1156 if (statesnew & RCUTORTURE_RDR_RCU) 1157 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1158 1159 /* Next, remove old protection, irq first due to bh conflict. */ 1160 if (statesold & RCUTORTURE_RDR_IRQ) 1161 local_irq_enable(); 1162 if (statesold & RCUTORTURE_RDR_BH) 1163 local_bh_enable(); 1164 if (statesold & RCUTORTURE_RDR_PREEMPT) 1165 preempt_enable(); 1166 if (statesold & RCUTORTURE_RDR_RBH) 1167 rcu_read_unlock_bh(); 1168 if (statesold & RCUTORTURE_RDR_SCHED) 1169 rcu_read_unlock_sched(); 1170 if (statesold & RCUTORTURE_RDR_RCU) 1171 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1172 1173 /* Delay if neither beginning nor end and there was a change. */ 1174 if ((statesnew || statesold) && *readstate && newstate) 1175 cur_ops->read_delay(trsp, rtrsp); 1176 1177 /* Update the reader state. */ 1178 if (idxnew == -1) 1179 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1180 WARN_ON_ONCE(idxnew < 0); 1181 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1182 *readstate = idxnew | newstate; 1183 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1184 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1185 } 1186 1187 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1188 static int rcutorture_extend_mask_max(void) 1189 { 1190 int mask; 1191 1192 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1193 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1194 mask = mask | RCUTORTURE_RDR_RCU; 1195 return mask; 1196 } 1197 1198 /* Return a random protection state mask, but with at least one bit set. */ 1199 static int 1200 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1201 { 1202 int mask = rcutorture_extend_mask_max(); 1203 unsigned long randmask1 = torture_random(trsp) >> 8; 1204 unsigned long randmask2 = randmask1 >> 3; 1205 1206 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1207 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1208 if (!(randmask1 & 0x7)) 1209 mask = mask & randmask2; 1210 else 1211 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1212 /* Can't enable bh w/irq disabled. */ 1213 if ((mask & RCUTORTURE_RDR_IRQ) && 1214 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1215 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1216 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1217 return mask ?: RCUTORTURE_RDR_RCU; 1218 } 1219 1220 /* 1221 * Do a randomly selected number of extensions of an existing RCU read-side 1222 * critical section. 1223 */ 1224 static struct rt_read_seg * 1225 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1226 struct rt_read_seg *rtrsp) 1227 { 1228 int i; 1229 int j; 1230 int mask = rcutorture_extend_mask_max(); 1231 1232 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1233 if (!((mask - 1) & mask)) 1234 return rtrsp; /* Current RCU reader not extendable. */ 1235 /* Bias towards larger numbers of loops. */ 1236 i = (torture_random(trsp) >> 3); 1237 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1238 for (j = 0; j < i; j++) { 1239 mask = rcutorture_extend_mask(*readstate, trsp); 1240 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1241 } 1242 return &rtrsp[j]; 1243 } 1244 1245 /* 1246 * Do one read-side critical section, returning false if there was 1247 * no data to read. Can be invoked both from process context and 1248 * from a timer handler. 1249 */ 1250 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1251 { 1252 int i; 1253 unsigned long started; 1254 unsigned long completed; 1255 int newstate; 1256 struct rcu_torture *p; 1257 int pipe_count; 1258 int readstate = 0; 1259 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1260 struct rt_read_seg *rtrsp = &rtseg[0]; 1261 struct rt_read_seg *rtrsp1; 1262 unsigned long long ts; 1263 1264 newstate = rcutorture_extend_mask(readstate, trsp); 1265 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1266 started = cur_ops->get_gp_seq(); 1267 ts = rcu_trace_clock_local(); 1268 p = rcu_dereference_check(rcu_torture_current, 1269 rcu_read_lock_bh_held() || 1270 rcu_read_lock_sched_held() || 1271 srcu_read_lock_held(srcu_ctlp) || 1272 torturing_tasks()); 1273 if (p == NULL) { 1274 /* Wait for rcu_torture_writer to get underway */ 1275 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1276 return false; 1277 } 1278 if (p->rtort_mbtest == 0) 1279 atomic_inc(&n_rcu_torture_mberror); 1280 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1281 preempt_disable(); 1282 pipe_count = p->rtort_pipe_count; 1283 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1284 /* Should not happen, but... */ 1285 pipe_count = RCU_TORTURE_PIPE_LEN; 1286 } 1287 completed = cur_ops->get_gp_seq(); 1288 if (pipe_count > 1) { 1289 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1290 ts, started, completed); 1291 rcu_ftrace_dump(DUMP_ALL); 1292 } 1293 __this_cpu_inc(rcu_torture_count[pipe_count]); 1294 completed = rcutorture_seq_diff(completed, started); 1295 if (completed > RCU_TORTURE_PIPE_LEN) { 1296 /* Should not happen, but... */ 1297 completed = RCU_TORTURE_PIPE_LEN; 1298 } 1299 __this_cpu_inc(rcu_torture_batch[completed]); 1300 preempt_enable(); 1301 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1302 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1303 1304 /* If error or close call, record the sequence of reader protections. */ 1305 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1306 i = 0; 1307 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1308 err_segs[i++] = *rtrsp1; 1309 rt_read_nsegs = i; 1310 } 1311 1312 return true; 1313 } 1314 1315 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1316 1317 /* 1318 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1319 * incrementing the corresponding element of the pipeline array. The 1320 * counter in the element should never be greater than 1, otherwise, the 1321 * RCU implementation is broken. 1322 */ 1323 static void rcu_torture_timer(struct timer_list *unused) 1324 { 1325 atomic_long_inc(&n_rcu_torture_timers); 1326 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1327 1328 /* Test call_rcu() invocation from interrupt handler. */ 1329 if (cur_ops->call) { 1330 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1331 1332 if (rhp) 1333 cur_ops->call(rhp, rcu_torture_timer_cb); 1334 } 1335 } 1336 1337 /* 1338 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1339 * incrementing the corresponding element of the pipeline array. The 1340 * counter in the element should never be greater than 1, otherwise, the 1341 * RCU implementation is broken. 1342 */ 1343 static int 1344 rcu_torture_reader(void *arg) 1345 { 1346 unsigned long lastsleep = jiffies; 1347 long myid = (long)arg; 1348 int mynumonline = myid; 1349 DEFINE_TORTURE_RANDOM(rand); 1350 struct timer_list t; 1351 1352 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1353 set_user_nice(current, MAX_NICE); 1354 if (irqreader && cur_ops->irq_capable) 1355 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1356 1357 do { 1358 if (irqreader && cur_ops->irq_capable) { 1359 if (!timer_pending(&t)) 1360 mod_timer(&t, jiffies + 1); 1361 } 1362 if (!rcu_torture_one_read(&rand)) 1363 schedule_timeout_interruptible(HZ); 1364 if (time_after(jiffies, lastsleep)) { 1365 schedule_timeout_interruptible(1); 1366 lastsleep = jiffies + 10; 1367 } 1368 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1369 schedule_timeout_interruptible(HZ / 5); 1370 stutter_wait("rcu_torture_reader"); 1371 } while (!torture_must_stop()); 1372 if (irqreader && cur_ops->irq_capable) { 1373 del_timer_sync(&t); 1374 destroy_timer_on_stack(&t); 1375 } 1376 torture_kthread_stopping("rcu_torture_reader"); 1377 return 0; 1378 } 1379 1380 /* 1381 * Print torture statistics. Caller must ensure that there is only 1382 * one call to this function at a given time!!! This is normally 1383 * accomplished by relying on the module system to only have one copy 1384 * of the module loaded, and then by giving the rcu_torture_stats 1385 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1386 * thread is not running). 1387 */ 1388 static void 1389 rcu_torture_stats_print(void) 1390 { 1391 int cpu; 1392 int i; 1393 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1394 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1395 static unsigned long rtcv_snap = ULONG_MAX; 1396 static bool splatted; 1397 struct task_struct *wtp; 1398 1399 for_each_possible_cpu(cpu) { 1400 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1401 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; 1402 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; 1403 } 1404 } 1405 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1406 if (pipesummary[i] != 0) 1407 break; 1408 } 1409 1410 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1411 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1412 rcu_torture_current, 1413 rcu_torture_current ? "ver" : "VER", 1414 rcu_torture_current_version, 1415 list_empty(&rcu_torture_freelist), 1416 atomic_read(&n_rcu_torture_alloc), 1417 atomic_read(&n_rcu_torture_alloc_fail), 1418 atomic_read(&n_rcu_torture_free)); 1419 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1420 atomic_read(&n_rcu_torture_mberror), 1421 n_rcu_torture_barrier_error, 1422 n_rcu_torture_boost_ktrerror, 1423 n_rcu_torture_boost_rterror); 1424 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1425 n_rcu_torture_boost_failure, 1426 n_rcu_torture_boosts, 1427 atomic_long_read(&n_rcu_torture_timers)); 1428 torture_onoff_stats(); 1429 pr_cont("barrier: %ld/%ld:%ld\n", 1430 n_barrier_successes, 1431 n_barrier_attempts, 1432 n_rcu_torture_barrier_error); 1433 1434 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1435 if (atomic_read(&n_rcu_torture_mberror) != 0 || 1436 n_rcu_torture_barrier_error != 0 || 1437 n_rcu_torture_boost_ktrerror != 0 || 1438 n_rcu_torture_boost_rterror != 0 || 1439 n_rcu_torture_boost_failure != 0 || 1440 i > 1) { 1441 pr_cont("%s", "!!! "); 1442 atomic_inc(&n_rcu_torture_error); 1443 WARN_ON_ONCE(1); 1444 } 1445 pr_cont("Reader Pipe: "); 1446 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1447 pr_cont(" %ld", pipesummary[i]); 1448 pr_cont("\n"); 1449 1450 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1451 pr_cont("Reader Batch: "); 1452 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1453 pr_cont(" %ld", batchsummary[i]); 1454 pr_cont("\n"); 1455 1456 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1457 pr_cont("Free-Block Circulation: "); 1458 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1459 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1460 } 1461 pr_cont("\n"); 1462 1463 if (cur_ops->stats) 1464 cur_ops->stats(); 1465 if (rtcv_snap == rcu_torture_current_version && 1466 rcu_torture_current != NULL) { 1467 int __maybe_unused flags = 0; 1468 unsigned long __maybe_unused gp_seq = 0; 1469 1470 rcutorture_get_gp_data(cur_ops->ttype, 1471 &flags, &gp_seq); 1472 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1473 &flags, &gp_seq); 1474 wtp = READ_ONCE(writer_task); 1475 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1476 rcu_torture_writer_state_getname(), 1477 rcu_torture_writer_state, gp_seq, flags, 1478 wtp == NULL ? ~0UL : wtp->state, 1479 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1480 if (!splatted && wtp) { 1481 sched_show_task(wtp); 1482 splatted = true; 1483 } 1484 show_rcu_gp_kthreads(); 1485 rcu_ftrace_dump(DUMP_ALL); 1486 } 1487 rtcv_snap = rcu_torture_current_version; 1488 } 1489 1490 /* 1491 * Periodically prints torture statistics, if periodic statistics printing 1492 * was specified via the stat_interval module parameter. 1493 */ 1494 static int 1495 rcu_torture_stats(void *arg) 1496 { 1497 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1498 do { 1499 schedule_timeout_interruptible(stat_interval * HZ); 1500 rcu_torture_stats_print(); 1501 torture_shutdown_absorb("rcu_torture_stats"); 1502 } while (!torture_must_stop()); 1503 torture_kthread_stopping("rcu_torture_stats"); 1504 return 0; 1505 } 1506 1507 static void 1508 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1509 { 1510 pr_alert("%s" TORTURE_FLAG 1511 "--- %s: nreaders=%d nfakewriters=%d " 1512 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1513 "shuffle_interval=%d stutter=%d irqreader=%d " 1514 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1515 "test_boost=%d/%d test_boost_interval=%d " 1516 "test_boost_duration=%d shutdown_secs=%d " 1517 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1518 "n_barrier_cbs=%d " 1519 "onoff_interval=%d onoff_holdoff=%d\n", 1520 torture_type, tag, nrealreaders, nfakewriters, 1521 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1522 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1523 test_boost, cur_ops->can_boost, 1524 test_boost_interval, test_boost_duration, shutdown_secs, 1525 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1526 n_barrier_cbs, 1527 onoff_interval, onoff_holdoff); 1528 } 1529 1530 static int rcutorture_booster_cleanup(unsigned int cpu) 1531 { 1532 struct task_struct *t; 1533 1534 if (boost_tasks[cpu] == NULL) 1535 return 0; 1536 mutex_lock(&boost_mutex); 1537 t = boost_tasks[cpu]; 1538 boost_tasks[cpu] = NULL; 1539 rcu_torture_enable_rt_throttle(); 1540 mutex_unlock(&boost_mutex); 1541 1542 /* This must be outside of the mutex, otherwise deadlock! */ 1543 torture_stop_kthread(rcu_torture_boost, t); 1544 return 0; 1545 } 1546 1547 static int rcutorture_booster_init(unsigned int cpu) 1548 { 1549 int retval; 1550 1551 if (boost_tasks[cpu] != NULL) 1552 return 0; /* Already created, nothing more to do. */ 1553 1554 /* Don't allow time recalculation while creating a new task. */ 1555 mutex_lock(&boost_mutex); 1556 rcu_torture_disable_rt_throttle(); 1557 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1558 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1559 cpu_to_node(cpu), 1560 "rcu_torture_boost"); 1561 if (IS_ERR(boost_tasks[cpu])) { 1562 retval = PTR_ERR(boost_tasks[cpu]); 1563 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1564 n_rcu_torture_boost_ktrerror++; 1565 boost_tasks[cpu] = NULL; 1566 mutex_unlock(&boost_mutex); 1567 return retval; 1568 } 1569 kthread_bind(boost_tasks[cpu], cpu); 1570 wake_up_process(boost_tasks[cpu]); 1571 mutex_unlock(&boost_mutex); 1572 return 0; 1573 } 1574 1575 /* 1576 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1577 * induces a CPU stall for the time specified by stall_cpu. 1578 */ 1579 static int rcu_torture_stall(void *args) 1580 { 1581 unsigned long stop_at; 1582 1583 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1584 if (stall_cpu_holdoff > 0) { 1585 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1586 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1587 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1588 } 1589 if (!kthread_should_stop()) { 1590 stop_at = ktime_get_seconds() + stall_cpu; 1591 /* RCU CPU stall is expected behavior in following code. */ 1592 rcu_read_lock(); 1593 if (stall_cpu_irqsoff) 1594 local_irq_disable(); 1595 else 1596 preempt_disable(); 1597 pr_alert("rcu_torture_stall start on CPU %d.\n", 1598 smp_processor_id()); 1599 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1600 stop_at)) 1601 continue; /* Induce RCU CPU stall warning. */ 1602 if (stall_cpu_irqsoff) 1603 local_irq_enable(); 1604 else 1605 preempt_enable(); 1606 rcu_read_unlock(); 1607 pr_alert("rcu_torture_stall end.\n"); 1608 } 1609 torture_shutdown_absorb("rcu_torture_stall"); 1610 while (!kthread_should_stop()) 1611 schedule_timeout_interruptible(10 * HZ); 1612 return 0; 1613 } 1614 1615 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1616 static int __init rcu_torture_stall_init(void) 1617 { 1618 if (stall_cpu <= 0) 1619 return 0; 1620 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1621 } 1622 1623 /* State structure for forward-progress self-propagating RCU callback. */ 1624 struct fwd_cb_state { 1625 struct rcu_head rh; 1626 int stop; 1627 }; 1628 1629 /* 1630 * Forward-progress self-propagating RCU callback function. Because 1631 * callbacks run from softirq, this function is an implicit RCU read-side 1632 * critical section. 1633 */ 1634 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1635 { 1636 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1637 1638 if (READ_ONCE(fcsp->stop)) { 1639 WRITE_ONCE(fcsp->stop, 2); 1640 return; 1641 } 1642 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1643 } 1644 1645 /* State for continuous-flood RCU callbacks. */ 1646 struct rcu_fwd_cb { 1647 struct rcu_head rh; 1648 struct rcu_fwd_cb *rfc_next; 1649 int rfc_gps; 1650 }; 1651 static DEFINE_SPINLOCK(rcu_fwd_lock); 1652 static struct rcu_fwd_cb *rcu_fwd_cb_head; 1653 static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1654 static long n_launders_cb; 1655 static unsigned long rcu_fwd_startat; 1656 static bool rcu_fwd_emergency_stop; 1657 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1658 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1659 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1660 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1661 struct rcu_launder_hist { 1662 long n_launders; 1663 unsigned long launder_gp_seq; 1664 }; 1665 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1666 static struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1667 static unsigned long rcu_launder_gp_seq_start; 1668 1669 static void rcu_torture_fwd_cb_hist(void) 1670 { 1671 unsigned long gps; 1672 unsigned long gps_old; 1673 int i; 1674 int j; 1675 1676 for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--) 1677 if (n_launders_hist[i].n_launders > 0) 1678 break; 1679 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1680 __func__, jiffies - rcu_fwd_startat); 1681 gps_old = rcu_launder_gp_seq_start; 1682 for (j = 0; j <= i; j++) { 1683 gps = n_launders_hist[j].launder_gp_seq; 1684 pr_cont(" %ds/%d: %ld:%ld", 1685 j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j].n_launders, 1686 rcutorture_seq_diff(gps, gps_old)); 1687 gps_old = gps; 1688 } 1689 pr_cont("\n"); 1690 } 1691 1692 /* Callback function for continuous-flood RCU callbacks. */ 1693 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1694 { 1695 unsigned long flags; 1696 int i; 1697 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1698 struct rcu_fwd_cb **rfcpp; 1699 1700 rfcp->rfc_next = NULL; 1701 rfcp->rfc_gps++; 1702 spin_lock_irqsave(&rcu_fwd_lock, flags); 1703 rfcpp = rcu_fwd_cb_tail; 1704 rcu_fwd_cb_tail = &rfcp->rfc_next; 1705 WRITE_ONCE(*rfcpp, rfcp); 1706 WRITE_ONCE(n_launders_cb, n_launders_cb + 1); 1707 i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1708 if (i >= ARRAY_SIZE(n_launders_hist)) 1709 i = ARRAY_SIZE(n_launders_hist) - 1; 1710 n_launders_hist[i].n_launders++; 1711 n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1712 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1713 } 1714 1715 // Give the scheduler a chance, even on nohz_full CPUs. 1716 static void rcu_torture_fwd_prog_cond_resched(void) 1717 { 1718 if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1719 if (need_resched()) 1720 schedule(); 1721 } else { 1722 cond_resched(); 1723 } 1724 } 1725 1726 /* 1727 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1728 * test is over or because we hit an OOM event. 1729 */ 1730 static unsigned long rcu_torture_fwd_prog_cbfree(void) 1731 { 1732 unsigned long flags; 1733 unsigned long freed = 0; 1734 struct rcu_fwd_cb *rfcp; 1735 1736 for (;;) { 1737 spin_lock_irqsave(&rcu_fwd_lock, flags); 1738 rfcp = rcu_fwd_cb_head; 1739 if (!rfcp) { 1740 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1741 break; 1742 } 1743 rcu_fwd_cb_head = rfcp->rfc_next; 1744 if (!rcu_fwd_cb_head) 1745 rcu_fwd_cb_tail = &rcu_fwd_cb_head; 1746 spin_unlock_irqrestore(&rcu_fwd_lock, flags); 1747 kfree(rfcp); 1748 freed++; 1749 rcu_torture_fwd_prog_cond_resched(); 1750 } 1751 return freed; 1752 } 1753 1754 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1755 static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries) 1756 { 1757 unsigned long cver; 1758 unsigned long dur; 1759 struct fwd_cb_state fcs; 1760 unsigned long gps; 1761 int idx; 1762 int sd; 1763 int sd4; 1764 bool selfpropcb = false; 1765 unsigned long stopat; 1766 static DEFINE_TORTURE_RANDOM(trs); 1767 1768 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1769 init_rcu_head_on_stack(&fcs.rh); 1770 selfpropcb = true; 1771 } 1772 1773 /* Tight loop containing cond_resched(). */ 1774 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1775 cur_ops->sync(); /* Later readers see above write. */ 1776 if (selfpropcb) { 1777 WRITE_ONCE(fcs.stop, 0); 1778 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1779 } 1780 cver = READ_ONCE(rcu_torture_current_version); 1781 gps = cur_ops->get_gp_seq(); 1782 sd = cur_ops->stall_dur() + 1; 1783 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1784 dur = sd4 + torture_random(&trs) % (sd - sd4); 1785 WRITE_ONCE(rcu_fwd_startat, jiffies); 1786 stopat = rcu_fwd_startat + dur; 1787 while (time_before(jiffies, stopat) && 1788 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1789 idx = cur_ops->readlock(); 1790 udelay(10); 1791 cur_ops->readunlock(idx); 1792 if (!fwd_progress_need_resched || need_resched()) 1793 rcu_torture_fwd_prog_cond_resched(); 1794 } 1795 (*tested_tries)++; 1796 if (!time_before(jiffies, stopat) && 1797 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1798 (*tested)++; 1799 cver = READ_ONCE(rcu_torture_current_version) - cver; 1800 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1801 WARN_ON(!cver && gps < 2); 1802 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1803 } 1804 if (selfpropcb) { 1805 WRITE_ONCE(fcs.stop, 1); 1806 cur_ops->sync(); /* Wait for running CB to complete. */ 1807 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1808 } 1809 1810 if (selfpropcb) { 1811 WARN_ON(READ_ONCE(fcs.stop) != 2); 1812 destroy_rcu_head_on_stack(&fcs.rh); 1813 } 1814 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1815 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1816 } 1817 1818 /* Carry out call_rcu() forward-progress testing. */ 1819 static void rcu_torture_fwd_prog_cr(void) 1820 { 1821 unsigned long cver; 1822 unsigned long gps; 1823 int i; 1824 long n_launders; 1825 long n_launders_cb_snap; 1826 long n_launders_sa; 1827 long n_max_cbs; 1828 long n_max_gps; 1829 struct rcu_fwd_cb *rfcp; 1830 struct rcu_fwd_cb *rfcpn; 1831 unsigned long stopat; 1832 unsigned long stoppedat; 1833 1834 if (READ_ONCE(rcu_fwd_emergency_stop)) 1835 return; /* Get out of the way quickly, no GP wait! */ 1836 if (!cur_ops->call) 1837 return; /* Can't do call_rcu() fwd prog without ->call. */ 1838 1839 /* Loop continuously posting RCU callbacks. */ 1840 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1841 cur_ops->sync(); /* Later readers see above write. */ 1842 WRITE_ONCE(rcu_fwd_startat, jiffies); 1843 stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1844 n_launders = 0; 1845 n_launders_cb = 0; 1846 n_launders_sa = 0; 1847 n_max_cbs = 0; 1848 n_max_gps = 0; 1849 for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++) 1850 n_launders_hist[i].n_launders = 0; 1851 cver = READ_ONCE(rcu_torture_current_version); 1852 gps = cur_ops->get_gp_seq(); 1853 rcu_launder_gp_seq_start = gps; 1854 while (time_before(jiffies, stopat) && 1855 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1856 rfcp = READ_ONCE(rcu_fwd_cb_head); 1857 rfcpn = NULL; 1858 if (rfcp) 1859 rfcpn = READ_ONCE(rfcp->rfc_next); 1860 if (rfcpn) { 1861 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1862 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1863 break; 1864 rcu_fwd_cb_head = rfcpn; 1865 n_launders++; 1866 n_launders_sa++; 1867 } else { 1868 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1869 if (WARN_ON_ONCE(!rfcp)) { 1870 schedule_timeout_interruptible(1); 1871 continue; 1872 } 1873 n_max_cbs++; 1874 n_launders_sa = 0; 1875 rfcp->rfc_gps = 0; 1876 } 1877 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1878 rcu_torture_fwd_prog_cond_resched(); 1879 } 1880 stoppedat = jiffies; 1881 n_launders_cb_snap = READ_ONCE(n_launders_cb); 1882 cver = READ_ONCE(rcu_torture_current_version) - cver; 1883 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1884 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1885 (void)rcu_torture_fwd_prog_cbfree(); 1886 1887 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) { 1888 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1889 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1890 __func__, 1891 stoppedat - rcu_fwd_startat, jiffies - stoppedat, 1892 n_launders + n_max_cbs - n_launders_cb_snap, 1893 n_launders, n_launders_sa, 1894 n_max_gps, n_max_cbs, cver, gps); 1895 rcu_torture_fwd_cb_hist(); 1896 } 1897 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 1898 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1899 } 1900 1901 1902 /* 1903 * OOM notifier, but this only prints diagnostic information for the 1904 * current forward-progress test. 1905 */ 1906 static int rcutorture_oom_notify(struct notifier_block *self, 1907 unsigned long notused, void *nfreed) 1908 { 1909 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 1910 __func__); 1911 rcu_torture_fwd_cb_hist(); 1912 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat)) / 2); 1913 WRITE_ONCE(rcu_fwd_emergency_stop, true); 1914 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 1915 pr_info("%s: Freed %lu RCU callbacks.\n", 1916 __func__, rcu_torture_fwd_prog_cbfree()); 1917 rcu_barrier(); 1918 pr_info("%s: Freed %lu RCU callbacks.\n", 1919 __func__, rcu_torture_fwd_prog_cbfree()); 1920 rcu_barrier(); 1921 pr_info("%s: Freed %lu RCU callbacks.\n", 1922 __func__, rcu_torture_fwd_prog_cbfree()); 1923 smp_mb(); /* Frees before return to avoid redoing OOM. */ 1924 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 1925 pr_info("%s returning after OOM processing.\n", __func__); 1926 return NOTIFY_OK; 1927 } 1928 1929 static struct notifier_block rcutorture_oom_nb = { 1930 .notifier_call = rcutorture_oom_notify 1931 }; 1932 1933 /* Carry out grace-period forward-progress testing. */ 1934 static int rcu_torture_fwd_prog(void *args) 1935 { 1936 int tested = 0; 1937 int tested_tries = 0; 1938 1939 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 1940 rcu_bind_current_to_nocb(); 1941 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 1942 set_user_nice(current, MAX_NICE); 1943 do { 1944 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 1945 WRITE_ONCE(rcu_fwd_emergency_stop, false); 1946 register_oom_notifier(&rcutorture_oom_nb); 1947 rcu_torture_fwd_prog_nr(&tested, &tested_tries); 1948 rcu_torture_fwd_prog_cr(); 1949 unregister_oom_notifier(&rcutorture_oom_nb); 1950 1951 /* Avoid slow periods, better to test when busy. */ 1952 stutter_wait("rcu_torture_fwd_prog"); 1953 } while (!torture_must_stop()); 1954 /* Short runs might not contain a valid forward-progress attempt. */ 1955 WARN_ON(!tested && tested_tries >= 5); 1956 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 1957 torture_kthread_stopping("rcu_torture_fwd_prog"); 1958 return 0; 1959 } 1960 1961 /* If forward-progress checking is requested and feasible, spawn the thread. */ 1962 static int __init rcu_torture_fwd_prog_init(void) 1963 { 1964 if (!fwd_progress) 1965 return 0; /* Not requested, so don't do it. */ 1966 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 1967 cur_ops == &rcu_busted_ops) { 1968 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 1969 return 0; 1970 } 1971 if (stall_cpu > 0) { 1972 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 1973 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 1974 return -EINVAL; /* In module, can fail back to user. */ 1975 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 1976 return 0; 1977 } 1978 if (fwd_progress_holdoff <= 0) 1979 fwd_progress_holdoff = 1; 1980 if (fwd_progress_div <= 0) 1981 fwd_progress_div = 4; 1982 return torture_create_kthread(rcu_torture_fwd_prog, 1983 NULL, fwd_prog_task); 1984 } 1985 1986 /* Callback function for RCU barrier testing. */ 1987 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 1988 { 1989 atomic_inc(&barrier_cbs_invoked); 1990 } 1991 1992 /* kthread function to register callbacks used to test RCU barriers. */ 1993 static int rcu_torture_barrier_cbs(void *arg) 1994 { 1995 long myid = (long)arg; 1996 bool lastphase = 0; 1997 bool newphase; 1998 struct rcu_head rcu; 1999 2000 init_rcu_head_on_stack(&rcu); 2001 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2002 set_user_nice(current, MAX_NICE); 2003 do { 2004 wait_event(barrier_cbs_wq[myid], 2005 (newphase = 2006 smp_load_acquire(&barrier_phase)) != lastphase || 2007 torture_must_stop()); 2008 lastphase = newphase; 2009 if (torture_must_stop()) 2010 break; 2011 /* 2012 * The above smp_load_acquire() ensures barrier_phase load 2013 * is ordered before the following ->call(). 2014 */ 2015 local_irq_disable(); /* Just to test no-irq call_rcu(). */ 2016 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2017 local_irq_enable(); 2018 if (atomic_dec_and_test(&barrier_cbs_count)) 2019 wake_up(&barrier_wq); 2020 } while (!torture_must_stop()); 2021 if (cur_ops->cb_barrier != NULL) 2022 cur_ops->cb_barrier(); 2023 destroy_rcu_head_on_stack(&rcu); 2024 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2025 return 0; 2026 } 2027 2028 /* kthread function to drive and coordinate RCU barrier testing. */ 2029 static int rcu_torture_barrier(void *arg) 2030 { 2031 int i; 2032 2033 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2034 do { 2035 atomic_set(&barrier_cbs_invoked, 0); 2036 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2037 /* Ensure barrier_phase ordered after prior assignments. */ 2038 smp_store_release(&barrier_phase, !barrier_phase); 2039 for (i = 0; i < n_barrier_cbs; i++) 2040 wake_up(&barrier_cbs_wq[i]); 2041 wait_event(barrier_wq, 2042 atomic_read(&barrier_cbs_count) == 0 || 2043 torture_must_stop()); 2044 if (torture_must_stop()) 2045 break; 2046 n_barrier_attempts++; 2047 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2048 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2049 n_rcu_torture_barrier_error++; 2050 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2051 atomic_read(&barrier_cbs_invoked), 2052 n_barrier_cbs); 2053 WARN_ON_ONCE(1); 2054 } else { 2055 n_barrier_successes++; 2056 } 2057 schedule_timeout_interruptible(HZ / 10); 2058 } while (!torture_must_stop()); 2059 torture_kthread_stopping("rcu_torture_barrier"); 2060 return 0; 2061 } 2062 2063 /* Initialize RCU barrier testing. */ 2064 static int rcu_torture_barrier_init(void) 2065 { 2066 int i; 2067 int ret; 2068 2069 if (n_barrier_cbs <= 0) 2070 return 0; 2071 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2072 pr_alert("%s" TORTURE_FLAG 2073 " Call or barrier ops missing for %s,\n", 2074 torture_type, cur_ops->name); 2075 pr_alert("%s" TORTURE_FLAG 2076 " RCU barrier testing omitted from run.\n", 2077 torture_type); 2078 return 0; 2079 } 2080 atomic_set(&barrier_cbs_count, 0); 2081 atomic_set(&barrier_cbs_invoked, 0); 2082 barrier_cbs_tasks = 2083 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2084 GFP_KERNEL); 2085 barrier_cbs_wq = 2086 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2087 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2088 return -ENOMEM; 2089 for (i = 0; i < n_barrier_cbs; i++) { 2090 init_waitqueue_head(&barrier_cbs_wq[i]); 2091 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2092 (void *)(long)i, 2093 barrier_cbs_tasks[i]); 2094 if (ret) 2095 return ret; 2096 } 2097 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2098 } 2099 2100 /* Clean up after RCU barrier testing. */ 2101 static void rcu_torture_barrier_cleanup(void) 2102 { 2103 int i; 2104 2105 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2106 if (barrier_cbs_tasks != NULL) { 2107 for (i = 0; i < n_barrier_cbs; i++) 2108 torture_stop_kthread(rcu_torture_barrier_cbs, 2109 barrier_cbs_tasks[i]); 2110 kfree(barrier_cbs_tasks); 2111 barrier_cbs_tasks = NULL; 2112 } 2113 if (barrier_cbs_wq != NULL) { 2114 kfree(barrier_cbs_wq); 2115 barrier_cbs_wq = NULL; 2116 } 2117 } 2118 2119 static bool rcu_torture_can_boost(void) 2120 { 2121 static int boost_warn_once; 2122 int prio; 2123 2124 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2125 return false; 2126 2127 prio = rcu_get_gp_kthreads_prio(); 2128 if (!prio) 2129 return false; 2130 2131 if (prio < 2) { 2132 if (boost_warn_once == 1) 2133 return false; 2134 2135 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2136 boost_warn_once = 1; 2137 return false; 2138 } 2139 2140 return true; 2141 } 2142 2143 static enum cpuhp_state rcutor_hp; 2144 2145 static void 2146 rcu_torture_cleanup(void) 2147 { 2148 int firsttime; 2149 int flags = 0; 2150 unsigned long gp_seq = 0; 2151 int i; 2152 2153 if (torture_cleanup_begin()) { 2154 if (cur_ops->cb_barrier != NULL) 2155 cur_ops->cb_barrier(); 2156 return; 2157 } 2158 if (!cur_ops) { 2159 torture_cleanup_end(); 2160 return; 2161 } 2162 2163 rcu_torture_barrier_cleanup(); 2164 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2165 torture_stop_kthread(rcu_torture_stall, stall_task); 2166 torture_stop_kthread(rcu_torture_writer, writer_task); 2167 2168 if (reader_tasks) { 2169 for (i = 0; i < nrealreaders; i++) 2170 torture_stop_kthread(rcu_torture_reader, 2171 reader_tasks[i]); 2172 kfree(reader_tasks); 2173 } 2174 rcu_torture_current = NULL; 2175 2176 if (fakewriter_tasks) { 2177 for (i = 0; i < nfakewriters; i++) { 2178 torture_stop_kthread(rcu_torture_fakewriter, 2179 fakewriter_tasks[i]); 2180 } 2181 kfree(fakewriter_tasks); 2182 fakewriter_tasks = NULL; 2183 } 2184 2185 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2186 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2187 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2188 cur_ops->name, gp_seq, flags); 2189 torture_stop_kthread(rcu_torture_stats, stats_task); 2190 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2191 if (rcu_torture_can_boost()) 2192 cpuhp_remove_state(rcutor_hp); 2193 2194 /* 2195 * Wait for all RCU callbacks to fire, then do torture-type-specific 2196 * cleanup operations. 2197 */ 2198 if (cur_ops->cb_barrier != NULL) 2199 cur_ops->cb_barrier(); 2200 if (cur_ops->cleanup != NULL) 2201 cur_ops->cleanup(); 2202 2203 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2204 2205 if (err_segs_recorded) { 2206 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2207 if (rt_read_nsegs == 0) 2208 pr_alert("\t: No segments recorded!!!\n"); 2209 firsttime = 1; 2210 for (i = 0; i < rt_read_nsegs; i++) { 2211 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2212 if (err_segs[i].rt_delay_jiffies != 0) { 2213 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2214 err_segs[i].rt_delay_jiffies); 2215 firsttime = 0; 2216 } 2217 if (err_segs[i].rt_delay_ms != 0) { 2218 pr_cont("%s%ldms", firsttime ? "" : "+", 2219 err_segs[i].rt_delay_ms); 2220 firsttime = 0; 2221 } 2222 if (err_segs[i].rt_delay_us != 0) { 2223 pr_cont("%s%ldus", firsttime ? "" : "+", 2224 err_segs[i].rt_delay_us); 2225 firsttime = 0; 2226 } 2227 pr_cont("%s\n", 2228 err_segs[i].rt_preempted ? "preempted" : ""); 2229 2230 } 2231 } 2232 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2233 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2234 else if (torture_onoff_failures()) 2235 rcu_torture_print_module_parms(cur_ops, 2236 "End of test: RCU_HOTPLUG"); 2237 else 2238 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2239 torture_cleanup_end(); 2240 } 2241 2242 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2243 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2244 { 2245 } 2246 2247 static void rcu_torture_err_cb(struct rcu_head *rhp) 2248 { 2249 /* 2250 * This -might- happen due to race conditions, but is unlikely. 2251 * The scenario that leads to this happening is that the 2252 * first of the pair of duplicate callbacks is queued, 2253 * someone else starts a grace period that includes that 2254 * callback, then the second of the pair must wait for the 2255 * next grace period. Unlikely, but can happen. If it 2256 * does happen, the debug-objects subsystem won't have splatted. 2257 */ 2258 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2259 } 2260 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2261 2262 /* 2263 * Verify that double-free causes debug-objects to complain, but only 2264 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2265 * cannot be carried out. 2266 */ 2267 static void rcu_test_debug_objects(void) 2268 { 2269 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2270 struct rcu_head rh1; 2271 struct rcu_head rh2; 2272 2273 init_rcu_head_on_stack(&rh1); 2274 init_rcu_head_on_stack(&rh2); 2275 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2276 2277 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2278 preempt_disable(); /* Prevent preemption from interrupting test. */ 2279 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2280 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2281 local_irq_disable(); /* Make it harder to start a new grace period. */ 2282 call_rcu(&rh2, rcu_torture_leak_cb); 2283 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2284 local_irq_enable(); 2285 rcu_read_unlock(); 2286 preempt_enable(); 2287 2288 /* Wait for them all to get done so we can safely return. */ 2289 rcu_barrier(); 2290 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2291 destroy_rcu_head_on_stack(&rh1); 2292 destroy_rcu_head_on_stack(&rh2); 2293 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2294 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2295 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2296 } 2297 2298 static void rcutorture_sync(void) 2299 { 2300 static unsigned long n; 2301 2302 if (cur_ops->sync && !(++n & 0xfff)) 2303 cur_ops->sync(); 2304 } 2305 2306 static int __init 2307 rcu_torture_init(void) 2308 { 2309 long i; 2310 int cpu; 2311 int firsterr = 0; 2312 static struct rcu_torture_ops *torture_ops[] = { 2313 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2314 &busted_srcud_ops, &tasks_ops, &trivial_ops, 2315 }; 2316 2317 if (!torture_init_begin(torture_type, verbose)) 2318 return -EBUSY; 2319 2320 /* Process args and tell the world that the torturer is on the job. */ 2321 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2322 cur_ops = torture_ops[i]; 2323 if (strcmp(torture_type, cur_ops->name) == 0) 2324 break; 2325 } 2326 if (i == ARRAY_SIZE(torture_ops)) { 2327 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2328 torture_type); 2329 pr_alert("rcu-torture types:"); 2330 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2331 pr_cont(" %s", torture_ops[i]->name); 2332 pr_cont("\n"); 2333 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2334 firsterr = -EINVAL; 2335 cur_ops = NULL; 2336 goto unwind; 2337 } 2338 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2339 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2340 fqs_duration = 0; 2341 } 2342 if (cur_ops->init) 2343 cur_ops->init(); 2344 2345 if (nreaders >= 0) { 2346 nrealreaders = nreaders; 2347 } else { 2348 nrealreaders = num_online_cpus() - 2 - nreaders; 2349 if (nrealreaders <= 0) 2350 nrealreaders = 1; 2351 } 2352 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2353 2354 /* Set up the freelist. */ 2355 2356 INIT_LIST_HEAD(&rcu_torture_freelist); 2357 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2358 rcu_tortures[i].rtort_mbtest = 0; 2359 list_add_tail(&rcu_tortures[i].rtort_free, 2360 &rcu_torture_freelist); 2361 } 2362 2363 /* Initialize the statistics so that each run gets its own numbers. */ 2364 2365 rcu_torture_current = NULL; 2366 rcu_torture_current_version = 0; 2367 atomic_set(&n_rcu_torture_alloc, 0); 2368 atomic_set(&n_rcu_torture_alloc_fail, 0); 2369 atomic_set(&n_rcu_torture_free, 0); 2370 atomic_set(&n_rcu_torture_mberror, 0); 2371 atomic_set(&n_rcu_torture_error, 0); 2372 n_rcu_torture_barrier_error = 0; 2373 n_rcu_torture_boost_ktrerror = 0; 2374 n_rcu_torture_boost_rterror = 0; 2375 n_rcu_torture_boost_failure = 0; 2376 n_rcu_torture_boosts = 0; 2377 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2378 atomic_set(&rcu_torture_wcount[i], 0); 2379 for_each_possible_cpu(cpu) { 2380 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2381 per_cpu(rcu_torture_count, cpu)[i] = 0; 2382 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2383 } 2384 } 2385 err_segs_recorded = 0; 2386 rt_read_nsegs = 0; 2387 2388 /* Start up the kthreads. */ 2389 2390 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2391 writer_task); 2392 if (firsterr) 2393 goto unwind; 2394 if (nfakewriters > 0) { 2395 fakewriter_tasks = kcalloc(nfakewriters, 2396 sizeof(fakewriter_tasks[0]), 2397 GFP_KERNEL); 2398 if (fakewriter_tasks == NULL) { 2399 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2400 firsterr = -ENOMEM; 2401 goto unwind; 2402 } 2403 } 2404 for (i = 0; i < nfakewriters; i++) { 2405 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2406 NULL, fakewriter_tasks[i]); 2407 if (firsterr) 2408 goto unwind; 2409 } 2410 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2411 GFP_KERNEL); 2412 if (reader_tasks == NULL) { 2413 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2414 firsterr = -ENOMEM; 2415 goto unwind; 2416 } 2417 for (i = 0; i < nrealreaders; i++) { 2418 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2419 reader_tasks[i]); 2420 if (firsterr) 2421 goto unwind; 2422 } 2423 if (stat_interval > 0) { 2424 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2425 stats_task); 2426 if (firsterr) 2427 goto unwind; 2428 } 2429 if (test_no_idle_hz && shuffle_interval > 0) { 2430 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2431 if (firsterr) 2432 goto unwind; 2433 } 2434 if (stutter < 0) 2435 stutter = 0; 2436 if (stutter) { 2437 int t; 2438 2439 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2440 firsterr = torture_stutter_init(stutter * HZ, t); 2441 if (firsterr) 2442 goto unwind; 2443 } 2444 if (fqs_duration < 0) 2445 fqs_duration = 0; 2446 if (fqs_duration) { 2447 /* Create the fqs thread */ 2448 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2449 fqs_task); 2450 if (firsterr) 2451 goto unwind; 2452 } 2453 if (test_boost_interval < 1) 2454 test_boost_interval = 1; 2455 if (test_boost_duration < 2) 2456 test_boost_duration = 2; 2457 if (rcu_torture_can_boost()) { 2458 2459 boost_starttime = jiffies + test_boost_interval * HZ; 2460 2461 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2462 rcutorture_booster_init, 2463 rcutorture_booster_cleanup); 2464 if (firsterr < 0) 2465 goto unwind; 2466 rcutor_hp = firsterr; 2467 } 2468 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2469 if (firsterr) 2470 goto unwind; 2471 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2472 rcutorture_sync); 2473 if (firsterr) 2474 goto unwind; 2475 firsterr = rcu_torture_stall_init(); 2476 if (firsterr) 2477 goto unwind; 2478 firsterr = rcu_torture_fwd_prog_init(); 2479 if (firsterr) 2480 goto unwind; 2481 firsterr = rcu_torture_barrier_init(); 2482 if (firsterr) 2483 goto unwind; 2484 if (object_debug) 2485 rcu_test_debug_objects(); 2486 torture_init_end(); 2487 return 0; 2488 2489 unwind: 2490 torture_init_end(); 2491 rcu_torture_cleanup(); 2492 return firsterr; 2493 } 2494 2495 module_init(rcu_torture_init); 2496 module_exit(rcu_torture_cleanup); 2497