1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 49 #include "rcu.h" 50 51 MODULE_LICENSE("GPL"); 52 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 53 54 55 /* Bits for ->extendables field, extendables param, and related definitions. */ 56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 65 #define RCUTORTURE_MAX_EXTEND \ 66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 69 /* Must be power of two minus one. */ 70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 71 72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 74 torture_param(int, fqs_duration, 0, 75 "Duration of fqs bursts (us), 0 to disable"); 76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 80 torture_param(int, fwd_progress_holdoff, 60, 81 "Time between forward-progress tests (s)"); 82 torture_param(bool, fwd_progress_need_resched, 1, 83 "Hide cond_resched() behind need_resched()"); 84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 86 torture_param(bool, gp_normal, false, 87 "Use normal (non-expedited) GP wait primitives"); 88 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 89 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 90 torture_param(int, n_barrier_cbs, 0, 91 "# of callbacks/kthreads for barrier testing"); 92 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 93 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 94 torture_param(int, object_debug, 0, 95 "Enable debug-object double call_rcu() testing"); 96 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 97 torture_param(int, onoff_interval, 0, 98 "Time between CPU hotplugs (jiffies), 0=disable"); 99 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 100 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 101 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 102 torture_param(int, stall_cpu_holdoff, 10, 103 "Time to wait before starting stall (s)."); 104 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 105 torture_param(int, stat_interval, 60, 106 "Number of seconds between stats printk()s"); 107 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 108 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 109 torture_param(int, test_boost_duration, 4, 110 "Duration of each boost test, seconds."); 111 torture_param(int, test_boost_interval, 7, 112 "Interval between boost tests, seconds."); 113 torture_param(bool, test_no_idle_hz, true, 114 "Test support for tickless idle CPUs"); 115 torture_param(int, verbose, 1, 116 "Enable verbose debugging printk()s"); 117 118 static char *torture_type = "rcu"; 119 module_param(torture_type, charp, 0444); 120 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 121 122 static int nrealreaders; 123 static struct task_struct *writer_task; 124 static struct task_struct **fakewriter_tasks; 125 static struct task_struct **reader_tasks; 126 static struct task_struct *stats_task; 127 static struct task_struct *fqs_task; 128 static struct task_struct *boost_tasks[NR_CPUS]; 129 static struct task_struct *stall_task; 130 static struct task_struct *fwd_prog_task; 131 static struct task_struct **barrier_cbs_tasks; 132 static struct task_struct *barrier_task; 133 134 #define RCU_TORTURE_PIPE_LEN 10 135 136 struct rcu_torture { 137 struct rcu_head rtort_rcu; 138 int rtort_pipe_count; 139 struct list_head rtort_free; 140 int rtort_mbtest; 141 }; 142 143 static LIST_HEAD(rcu_torture_freelist); 144 static struct rcu_torture __rcu *rcu_torture_current; 145 static unsigned long rcu_torture_current_version; 146 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 147 static DEFINE_SPINLOCK(rcu_torture_lock); 148 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 149 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 150 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 151 static atomic_t n_rcu_torture_alloc; 152 static atomic_t n_rcu_torture_alloc_fail; 153 static atomic_t n_rcu_torture_free; 154 static atomic_t n_rcu_torture_mberror; 155 static atomic_t n_rcu_torture_error; 156 static long n_rcu_torture_barrier_error; 157 static long n_rcu_torture_boost_ktrerror; 158 static long n_rcu_torture_boost_rterror; 159 static long n_rcu_torture_boost_failure; 160 static long n_rcu_torture_boosts; 161 static atomic_long_t n_rcu_torture_timers; 162 static long n_barrier_attempts; 163 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 164 static struct list_head rcu_torture_removed; 165 static unsigned long shutdown_jiffies; 166 167 static int rcu_torture_writer_state; 168 #define RTWS_FIXED_DELAY 0 169 #define RTWS_DELAY 1 170 #define RTWS_REPLACE 2 171 #define RTWS_DEF_FREE 3 172 #define RTWS_EXP_SYNC 4 173 #define RTWS_COND_GET 5 174 #define RTWS_COND_SYNC 6 175 #define RTWS_SYNC 7 176 #define RTWS_STUTTER 8 177 #define RTWS_STOPPING 9 178 static const char * const rcu_torture_writer_state_names[] = { 179 "RTWS_FIXED_DELAY", 180 "RTWS_DELAY", 181 "RTWS_REPLACE", 182 "RTWS_DEF_FREE", 183 "RTWS_EXP_SYNC", 184 "RTWS_COND_GET", 185 "RTWS_COND_SYNC", 186 "RTWS_SYNC", 187 "RTWS_STUTTER", 188 "RTWS_STOPPING", 189 }; 190 191 /* Record reader segment types and duration for first failing read. */ 192 struct rt_read_seg { 193 int rt_readstate; 194 unsigned long rt_delay_jiffies; 195 unsigned long rt_delay_ms; 196 unsigned long rt_delay_us; 197 bool rt_preempted; 198 }; 199 static int err_segs_recorded; 200 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 201 static int rt_read_nsegs; 202 203 static const char *rcu_torture_writer_state_getname(void) 204 { 205 unsigned int i = READ_ONCE(rcu_torture_writer_state); 206 207 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 208 return "???"; 209 return rcu_torture_writer_state_names[i]; 210 } 211 212 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 213 #define rcu_can_boost() 1 214 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 215 #define rcu_can_boost() 0 216 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 217 218 #ifdef CONFIG_RCU_TRACE 219 static u64 notrace rcu_trace_clock_local(void) 220 { 221 u64 ts = trace_clock_local(); 222 223 (void)do_div(ts, NSEC_PER_USEC); 224 return ts; 225 } 226 #else /* #ifdef CONFIG_RCU_TRACE */ 227 static u64 notrace rcu_trace_clock_local(void) 228 { 229 return 0ULL; 230 } 231 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 232 233 /* 234 * Stop aggressive CPU-hog tests a bit before the end of the test in order 235 * to avoid interfering with test shutdown. 236 */ 237 static bool shutdown_time_arrived(void) 238 { 239 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 240 } 241 242 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 243 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 244 /* and boost task create/destroy. */ 245 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 246 static bool barrier_phase; /* Test phase. */ 247 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 248 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 249 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 250 251 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 252 253 /* 254 * Allocate an element from the rcu_tortures pool. 255 */ 256 static struct rcu_torture * 257 rcu_torture_alloc(void) 258 { 259 struct list_head *p; 260 261 spin_lock_bh(&rcu_torture_lock); 262 if (list_empty(&rcu_torture_freelist)) { 263 atomic_inc(&n_rcu_torture_alloc_fail); 264 spin_unlock_bh(&rcu_torture_lock); 265 return NULL; 266 } 267 atomic_inc(&n_rcu_torture_alloc); 268 p = rcu_torture_freelist.next; 269 list_del_init(p); 270 spin_unlock_bh(&rcu_torture_lock); 271 return container_of(p, struct rcu_torture, rtort_free); 272 } 273 274 /* 275 * Free an element to the rcu_tortures pool. 276 */ 277 static void 278 rcu_torture_free(struct rcu_torture *p) 279 { 280 atomic_inc(&n_rcu_torture_free); 281 spin_lock_bh(&rcu_torture_lock); 282 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 283 spin_unlock_bh(&rcu_torture_lock); 284 } 285 286 /* 287 * Operations vector for selecting different types of tests. 288 */ 289 290 struct rcu_torture_ops { 291 int ttype; 292 void (*init)(void); 293 void (*cleanup)(void); 294 int (*readlock)(void); 295 void (*read_delay)(struct torture_random_state *rrsp, 296 struct rt_read_seg *rtrsp); 297 void (*readunlock)(int idx); 298 unsigned long (*get_gp_seq)(void); 299 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 300 void (*deferred_free)(struct rcu_torture *p); 301 void (*sync)(void); 302 void (*exp_sync)(void); 303 unsigned long (*get_state)(void); 304 void (*cond_sync)(unsigned long oldstate); 305 call_rcu_func_t call; 306 void (*cb_barrier)(void); 307 void (*fqs)(void); 308 void (*stats)(void); 309 int (*stall_dur)(void); 310 int irq_capable; 311 int can_boost; 312 int extendables; 313 int slow_gps; 314 const char *name; 315 }; 316 317 static struct rcu_torture_ops *cur_ops; 318 319 /* 320 * Definitions for rcu torture testing. 321 */ 322 323 static int rcu_torture_read_lock(void) __acquires(RCU) 324 { 325 rcu_read_lock(); 326 return 0; 327 } 328 329 static void 330 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 331 { 332 unsigned long started; 333 unsigned long completed; 334 const unsigned long shortdelay_us = 200; 335 unsigned long longdelay_ms = 300; 336 unsigned long long ts; 337 338 /* We want a short delay sometimes to make a reader delay the grace 339 * period, and we want a long delay occasionally to trigger 340 * force_quiescent_state. */ 341 342 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 343 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 344 started = cur_ops->get_gp_seq(); 345 ts = rcu_trace_clock_local(); 346 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 347 longdelay_ms = 5; /* Avoid triggering BH limits. */ 348 mdelay(longdelay_ms); 349 rtrsp->rt_delay_ms = longdelay_ms; 350 completed = cur_ops->get_gp_seq(); 351 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 352 started, completed); 353 } 354 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 355 udelay(shortdelay_us); 356 rtrsp->rt_delay_us = shortdelay_us; 357 } 358 if (!preempt_count() && 359 !(torture_random(rrsp) % (nrealreaders * 500))) { 360 torture_preempt_schedule(); /* QS only if preemptible. */ 361 rtrsp->rt_preempted = true; 362 } 363 } 364 365 static void rcu_torture_read_unlock(int idx) __releases(RCU) 366 { 367 rcu_read_unlock(); 368 } 369 370 /* 371 * Update callback in the pipe. This should be invoked after a grace period. 372 */ 373 static bool 374 rcu_torture_pipe_update_one(struct rcu_torture *rp) 375 { 376 int i; 377 378 i = READ_ONCE(rp->rtort_pipe_count); 379 if (i > RCU_TORTURE_PIPE_LEN) 380 i = RCU_TORTURE_PIPE_LEN; 381 atomic_inc(&rcu_torture_wcount[i]); 382 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 383 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 384 rp->rtort_mbtest = 0; 385 return true; 386 } 387 return false; 388 } 389 390 /* 391 * Update all callbacks in the pipe. Suitable for synchronous grace-period 392 * primitives. 393 */ 394 static void 395 rcu_torture_pipe_update(struct rcu_torture *old_rp) 396 { 397 struct rcu_torture *rp; 398 struct rcu_torture *rp1; 399 400 if (old_rp) 401 list_add(&old_rp->rtort_free, &rcu_torture_removed); 402 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 403 if (rcu_torture_pipe_update_one(rp)) { 404 list_del(&rp->rtort_free); 405 rcu_torture_free(rp); 406 } 407 } 408 } 409 410 static void 411 rcu_torture_cb(struct rcu_head *p) 412 { 413 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 414 415 if (torture_must_stop_irq()) { 416 /* Test is ending, just drop callbacks on the floor. */ 417 /* The next initialization will pick up the pieces. */ 418 return; 419 } 420 if (rcu_torture_pipe_update_one(rp)) 421 rcu_torture_free(rp); 422 else 423 cur_ops->deferred_free(rp); 424 } 425 426 static unsigned long rcu_no_completed(void) 427 { 428 return 0; 429 } 430 431 static void rcu_torture_deferred_free(struct rcu_torture *p) 432 { 433 call_rcu(&p->rtort_rcu, rcu_torture_cb); 434 } 435 436 static void rcu_sync_torture_init(void) 437 { 438 INIT_LIST_HEAD(&rcu_torture_removed); 439 } 440 441 static struct rcu_torture_ops rcu_ops = { 442 .ttype = RCU_FLAVOR, 443 .init = rcu_sync_torture_init, 444 .readlock = rcu_torture_read_lock, 445 .read_delay = rcu_read_delay, 446 .readunlock = rcu_torture_read_unlock, 447 .get_gp_seq = rcu_get_gp_seq, 448 .gp_diff = rcu_seq_diff, 449 .deferred_free = rcu_torture_deferred_free, 450 .sync = synchronize_rcu, 451 .exp_sync = synchronize_rcu_expedited, 452 .get_state = get_state_synchronize_rcu, 453 .cond_sync = cond_synchronize_rcu, 454 .call = call_rcu, 455 .cb_barrier = rcu_barrier, 456 .fqs = rcu_force_quiescent_state, 457 .stats = NULL, 458 .stall_dur = rcu_jiffies_till_stall_check, 459 .irq_capable = 1, 460 .can_boost = rcu_can_boost(), 461 .extendables = RCUTORTURE_MAX_EXTEND, 462 .name = "rcu" 463 }; 464 465 /* 466 * Don't even think about trying any of these in real life!!! 467 * The names includes "busted", and they really means it! 468 * The only purpose of these functions is to provide a buggy RCU 469 * implementation to make sure that rcutorture correctly emits 470 * buggy-RCU error messages. 471 */ 472 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 473 { 474 /* This is a deliberate bug for testing purposes only! */ 475 rcu_torture_cb(&p->rtort_rcu); 476 } 477 478 static void synchronize_rcu_busted(void) 479 { 480 /* This is a deliberate bug for testing purposes only! */ 481 } 482 483 static void 484 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 485 { 486 /* This is a deliberate bug for testing purposes only! */ 487 func(head); 488 } 489 490 static struct rcu_torture_ops rcu_busted_ops = { 491 .ttype = INVALID_RCU_FLAVOR, 492 .init = rcu_sync_torture_init, 493 .readlock = rcu_torture_read_lock, 494 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 495 .readunlock = rcu_torture_read_unlock, 496 .get_gp_seq = rcu_no_completed, 497 .deferred_free = rcu_busted_torture_deferred_free, 498 .sync = synchronize_rcu_busted, 499 .exp_sync = synchronize_rcu_busted, 500 .call = call_rcu_busted, 501 .cb_barrier = NULL, 502 .fqs = NULL, 503 .stats = NULL, 504 .irq_capable = 1, 505 .name = "busted" 506 }; 507 508 /* 509 * Definitions for srcu torture testing. 510 */ 511 512 DEFINE_STATIC_SRCU(srcu_ctl); 513 static struct srcu_struct srcu_ctld; 514 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 515 516 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 517 { 518 return srcu_read_lock(srcu_ctlp); 519 } 520 521 static void 522 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 523 { 524 long delay; 525 const long uspertick = 1000000 / HZ; 526 const long longdelay = 10; 527 528 /* We want there to be long-running readers, but not all the time. */ 529 530 delay = torture_random(rrsp) % 531 (nrealreaders * 2 * longdelay * uspertick); 532 if (!delay && in_task()) { 533 schedule_timeout_interruptible(longdelay); 534 rtrsp->rt_delay_jiffies = longdelay; 535 } else { 536 rcu_read_delay(rrsp, rtrsp); 537 } 538 } 539 540 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 541 { 542 srcu_read_unlock(srcu_ctlp, idx); 543 } 544 545 static unsigned long srcu_torture_completed(void) 546 { 547 return srcu_batches_completed(srcu_ctlp); 548 } 549 550 static void srcu_torture_deferred_free(struct rcu_torture *rp) 551 { 552 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 553 } 554 555 static void srcu_torture_synchronize(void) 556 { 557 synchronize_srcu(srcu_ctlp); 558 } 559 560 static void srcu_torture_call(struct rcu_head *head, 561 rcu_callback_t func) 562 { 563 call_srcu(srcu_ctlp, head, func); 564 } 565 566 static void srcu_torture_barrier(void) 567 { 568 srcu_barrier(srcu_ctlp); 569 } 570 571 static void srcu_torture_stats(void) 572 { 573 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 574 } 575 576 static void srcu_torture_synchronize_expedited(void) 577 { 578 synchronize_srcu_expedited(srcu_ctlp); 579 } 580 581 static struct rcu_torture_ops srcu_ops = { 582 .ttype = SRCU_FLAVOR, 583 .init = rcu_sync_torture_init, 584 .readlock = srcu_torture_read_lock, 585 .read_delay = srcu_read_delay, 586 .readunlock = srcu_torture_read_unlock, 587 .get_gp_seq = srcu_torture_completed, 588 .deferred_free = srcu_torture_deferred_free, 589 .sync = srcu_torture_synchronize, 590 .exp_sync = srcu_torture_synchronize_expedited, 591 .call = srcu_torture_call, 592 .cb_barrier = srcu_torture_barrier, 593 .stats = srcu_torture_stats, 594 .irq_capable = 1, 595 .name = "srcu" 596 }; 597 598 static void srcu_torture_init(void) 599 { 600 rcu_sync_torture_init(); 601 WARN_ON(init_srcu_struct(&srcu_ctld)); 602 srcu_ctlp = &srcu_ctld; 603 } 604 605 static void srcu_torture_cleanup(void) 606 { 607 cleanup_srcu_struct(&srcu_ctld); 608 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 609 } 610 611 /* As above, but dynamically allocated. */ 612 static struct rcu_torture_ops srcud_ops = { 613 .ttype = SRCU_FLAVOR, 614 .init = srcu_torture_init, 615 .cleanup = srcu_torture_cleanup, 616 .readlock = srcu_torture_read_lock, 617 .read_delay = srcu_read_delay, 618 .readunlock = srcu_torture_read_unlock, 619 .get_gp_seq = srcu_torture_completed, 620 .deferred_free = srcu_torture_deferred_free, 621 .sync = srcu_torture_synchronize, 622 .exp_sync = srcu_torture_synchronize_expedited, 623 .call = srcu_torture_call, 624 .cb_barrier = srcu_torture_barrier, 625 .stats = srcu_torture_stats, 626 .irq_capable = 1, 627 .name = "srcud" 628 }; 629 630 /* As above, but broken due to inappropriate reader extension. */ 631 static struct rcu_torture_ops busted_srcud_ops = { 632 .ttype = SRCU_FLAVOR, 633 .init = srcu_torture_init, 634 .cleanup = srcu_torture_cleanup, 635 .readlock = srcu_torture_read_lock, 636 .read_delay = rcu_read_delay, 637 .readunlock = srcu_torture_read_unlock, 638 .get_gp_seq = srcu_torture_completed, 639 .deferred_free = srcu_torture_deferred_free, 640 .sync = srcu_torture_synchronize, 641 .exp_sync = srcu_torture_synchronize_expedited, 642 .call = srcu_torture_call, 643 .cb_barrier = srcu_torture_barrier, 644 .stats = srcu_torture_stats, 645 .irq_capable = 1, 646 .extendables = RCUTORTURE_MAX_EXTEND, 647 .name = "busted_srcud" 648 }; 649 650 /* 651 * Definitions for RCU-tasks torture testing. 652 */ 653 654 static int tasks_torture_read_lock(void) 655 { 656 return 0; 657 } 658 659 static void tasks_torture_read_unlock(int idx) 660 { 661 } 662 663 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 664 { 665 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 666 } 667 668 static void synchronize_rcu_mult_test(void) 669 { 670 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 671 } 672 673 static struct rcu_torture_ops tasks_ops = { 674 .ttype = RCU_TASKS_FLAVOR, 675 .init = rcu_sync_torture_init, 676 .readlock = tasks_torture_read_lock, 677 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 678 .readunlock = tasks_torture_read_unlock, 679 .get_gp_seq = rcu_no_completed, 680 .deferred_free = rcu_tasks_torture_deferred_free, 681 .sync = synchronize_rcu_tasks, 682 .exp_sync = synchronize_rcu_mult_test, 683 .call = call_rcu_tasks, 684 .cb_barrier = rcu_barrier_tasks, 685 .fqs = NULL, 686 .stats = NULL, 687 .irq_capable = 1, 688 .slow_gps = 1, 689 .name = "tasks" 690 }; 691 692 /* 693 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 694 * This implementation does not necessarily work well with CPU hotplug. 695 */ 696 697 static void synchronize_rcu_trivial(void) 698 { 699 int cpu; 700 701 for_each_online_cpu(cpu) { 702 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 703 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 704 } 705 } 706 707 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 708 { 709 preempt_disable(); 710 return 0; 711 } 712 713 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 714 { 715 preempt_enable(); 716 } 717 718 static struct rcu_torture_ops trivial_ops = { 719 .ttype = RCU_TRIVIAL_FLAVOR, 720 .init = rcu_sync_torture_init, 721 .readlock = rcu_torture_read_lock_trivial, 722 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 723 .readunlock = rcu_torture_read_unlock_trivial, 724 .get_gp_seq = rcu_no_completed, 725 .sync = synchronize_rcu_trivial, 726 .exp_sync = synchronize_rcu_trivial, 727 .fqs = NULL, 728 .stats = NULL, 729 .irq_capable = 1, 730 .name = "trivial" 731 }; 732 733 /* 734 * Definitions for rude RCU-tasks torture testing. 735 */ 736 737 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 738 { 739 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 740 } 741 742 static struct rcu_torture_ops tasks_rude_ops = { 743 .ttype = RCU_TASKS_RUDE_FLAVOR, 744 .init = rcu_sync_torture_init, 745 .readlock = rcu_torture_read_lock_trivial, 746 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 747 .readunlock = rcu_torture_read_unlock_trivial, 748 .get_gp_seq = rcu_no_completed, 749 .deferred_free = rcu_tasks_rude_torture_deferred_free, 750 .sync = synchronize_rcu_tasks_rude, 751 .exp_sync = synchronize_rcu_tasks_rude, 752 .call = call_rcu_tasks_rude, 753 .cb_barrier = rcu_barrier_tasks_rude, 754 .fqs = NULL, 755 .stats = NULL, 756 .irq_capable = 1, 757 .name = "tasks-rude" 758 }; 759 760 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 761 { 762 if (!cur_ops->gp_diff) 763 return new - old; 764 return cur_ops->gp_diff(new, old); 765 } 766 767 static bool __maybe_unused torturing_tasks(void) 768 { 769 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 770 } 771 772 /* 773 * RCU torture priority-boost testing. Runs one real-time thread per 774 * CPU for moderate bursts, repeatedly registering RCU callbacks and 775 * spinning waiting for them to be invoked. If a given callback takes 776 * too long to be invoked, we assume that priority inversion has occurred. 777 */ 778 779 struct rcu_boost_inflight { 780 struct rcu_head rcu; 781 int inflight; 782 }; 783 784 static void rcu_torture_boost_cb(struct rcu_head *head) 785 { 786 struct rcu_boost_inflight *rbip = 787 container_of(head, struct rcu_boost_inflight, rcu); 788 789 /* Ensure RCU-core accesses precede clearing ->inflight */ 790 smp_store_release(&rbip->inflight, 0); 791 } 792 793 static int old_rt_runtime = -1; 794 795 static void rcu_torture_disable_rt_throttle(void) 796 { 797 /* 798 * Disable RT throttling so that rcutorture's boost threads don't get 799 * throttled. Only possible if rcutorture is built-in otherwise the 800 * user should manually do this by setting the sched_rt_period_us and 801 * sched_rt_runtime sysctls. 802 */ 803 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 804 return; 805 806 old_rt_runtime = sysctl_sched_rt_runtime; 807 sysctl_sched_rt_runtime = -1; 808 } 809 810 static void rcu_torture_enable_rt_throttle(void) 811 { 812 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 813 return; 814 815 sysctl_sched_rt_runtime = old_rt_runtime; 816 old_rt_runtime = -1; 817 } 818 819 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 820 { 821 if (end - start > test_boost_duration * HZ - HZ / 2) { 822 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 823 n_rcu_torture_boost_failure++; 824 825 return true; /* failed */ 826 } 827 828 return false; /* passed */ 829 } 830 831 static int rcu_torture_boost(void *arg) 832 { 833 unsigned long call_rcu_time; 834 unsigned long endtime; 835 unsigned long oldstarttime; 836 struct rcu_boost_inflight rbi = { .inflight = 0 }; 837 struct sched_param sp; 838 839 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 840 841 /* Set real-time priority. */ 842 sp.sched_priority = 1; 843 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 844 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 845 n_rcu_torture_boost_rterror++; 846 } 847 848 init_rcu_head_on_stack(&rbi.rcu); 849 /* Each pass through the following loop does one boost-test cycle. */ 850 do { 851 /* Track if the test failed already in this test interval? */ 852 bool failed = false; 853 854 /* Increment n_rcu_torture_boosts once per boost-test */ 855 while (!kthread_should_stop()) { 856 if (mutex_trylock(&boost_mutex)) { 857 n_rcu_torture_boosts++; 858 mutex_unlock(&boost_mutex); 859 break; 860 } 861 schedule_timeout_uninterruptible(1); 862 } 863 if (kthread_should_stop()) 864 goto checkwait; 865 866 /* Wait for the next test interval. */ 867 oldstarttime = boost_starttime; 868 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 869 schedule_timeout_interruptible(oldstarttime - jiffies); 870 stutter_wait("rcu_torture_boost"); 871 if (torture_must_stop()) 872 goto checkwait; 873 } 874 875 /* Do one boost-test interval. */ 876 endtime = oldstarttime + test_boost_duration * HZ; 877 call_rcu_time = jiffies; 878 while (ULONG_CMP_LT(jiffies, endtime)) { 879 /* If we don't have a callback in flight, post one. */ 880 if (!smp_load_acquire(&rbi.inflight)) { 881 /* RCU core before ->inflight = 1. */ 882 smp_store_release(&rbi.inflight, 1); 883 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 884 /* Check if the boost test failed */ 885 failed = failed || 886 rcu_torture_boost_failed(call_rcu_time, 887 jiffies); 888 call_rcu_time = jiffies; 889 } 890 stutter_wait("rcu_torture_boost"); 891 if (torture_must_stop()) 892 goto checkwait; 893 } 894 895 /* 896 * If boost never happened, then inflight will always be 1, in 897 * this case the boost check would never happen in the above 898 * loop so do another one here. 899 */ 900 if (!failed && smp_load_acquire(&rbi.inflight)) 901 rcu_torture_boost_failed(call_rcu_time, jiffies); 902 903 /* 904 * Set the start time of the next test interval. 905 * Yes, this is vulnerable to long delays, but such 906 * delays simply cause a false negative for the next 907 * interval. Besides, we are running at RT priority, 908 * so delays should be relatively rare. 909 */ 910 while (oldstarttime == boost_starttime && 911 !kthread_should_stop()) { 912 if (mutex_trylock(&boost_mutex)) { 913 boost_starttime = jiffies + 914 test_boost_interval * HZ; 915 mutex_unlock(&boost_mutex); 916 break; 917 } 918 schedule_timeout_uninterruptible(1); 919 } 920 921 /* Go do the stutter. */ 922 checkwait: stutter_wait("rcu_torture_boost"); 923 } while (!torture_must_stop()); 924 925 /* Clean up and exit. */ 926 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 927 torture_shutdown_absorb("rcu_torture_boost"); 928 schedule_timeout_uninterruptible(1); 929 } 930 destroy_rcu_head_on_stack(&rbi.rcu); 931 torture_kthread_stopping("rcu_torture_boost"); 932 return 0; 933 } 934 935 /* 936 * RCU torture force-quiescent-state kthread. Repeatedly induces 937 * bursts of calls to force_quiescent_state(), increasing the probability 938 * of occurrence of some important types of race conditions. 939 */ 940 static int 941 rcu_torture_fqs(void *arg) 942 { 943 unsigned long fqs_resume_time; 944 int fqs_burst_remaining; 945 946 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 947 do { 948 fqs_resume_time = jiffies + fqs_stutter * HZ; 949 while (ULONG_CMP_LT(jiffies, fqs_resume_time) && 950 !kthread_should_stop()) { 951 schedule_timeout_interruptible(1); 952 } 953 fqs_burst_remaining = fqs_duration; 954 while (fqs_burst_remaining > 0 && 955 !kthread_should_stop()) { 956 cur_ops->fqs(); 957 udelay(fqs_holdoff); 958 fqs_burst_remaining -= fqs_holdoff; 959 } 960 stutter_wait("rcu_torture_fqs"); 961 } while (!torture_must_stop()); 962 torture_kthread_stopping("rcu_torture_fqs"); 963 return 0; 964 } 965 966 /* 967 * RCU torture writer kthread. Repeatedly substitutes a new structure 968 * for that pointed to by rcu_torture_current, freeing the old structure 969 * after a series of grace periods (the "pipeline"). 970 */ 971 static int 972 rcu_torture_writer(void *arg) 973 { 974 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 975 int expediting = 0; 976 unsigned long gp_snap; 977 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 978 bool gp_sync1 = gp_sync; 979 int i; 980 struct rcu_torture *rp; 981 struct rcu_torture *old_rp; 982 static DEFINE_TORTURE_RANDOM(rand); 983 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 984 RTWS_COND_GET, RTWS_SYNC }; 985 int nsynctypes = 0; 986 987 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 988 if (!can_expedite) 989 pr_alert("%s" TORTURE_FLAG 990 " GP expediting controlled from boot/sysfs for %s.\n", 991 torture_type, cur_ops->name); 992 993 /* Initialize synctype[] array. If none set, take default. */ 994 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 995 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 996 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 997 synctype[nsynctypes++] = RTWS_COND_GET; 998 pr_info("%s: Testing conditional GPs.\n", __func__); 999 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 1000 pr_alert("%s: gp_cond without primitives.\n", __func__); 1001 } 1002 if (gp_exp1 && cur_ops->exp_sync) { 1003 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1004 pr_info("%s: Testing expedited GPs.\n", __func__); 1005 } else if (gp_exp && !cur_ops->exp_sync) { 1006 pr_alert("%s: gp_exp without primitives.\n", __func__); 1007 } 1008 if (gp_normal1 && cur_ops->deferred_free) { 1009 synctype[nsynctypes++] = RTWS_DEF_FREE; 1010 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1011 } else if (gp_normal && !cur_ops->deferred_free) { 1012 pr_alert("%s: gp_normal without primitives.\n", __func__); 1013 } 1014 if (gp_sync1 && cur_ops->sync) { 1015 synctype[nsynctypes++] = RTWS_SYNC; 1016 pr_info("%s: Testing normal GPs.\n", __func__); 1017 } else if (gp_sync && !cur_ops->sync) { 1018 pr_alert("%s: gp_sync without primitives.\n", __func__); 1019 } 1020 if (WARN_ONCE(nsynctypes == 0, 1021 "rcu_torture_writer: No update-side primitives.\n")) { 1022 /* 1023 * No updates primitives, so don't try updating. 1024 * The resulting test won't be testing much, hence the 1025 * above WARN_ONCE(). 1026 */ 1027 rcu_torture_writer_state = RTWS_STOPPING; 1028 torture_kthread_stopping("rcu_torture_writer"); 1029 } 1030 1031 do { 1032 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1033 schedule_timeout_uninterruptible(1); 1034 rp = rcu_torture_alloc(); 1035 if (rp == NULL) 1036 continue; 1037 rp->rtort_pipe_count = 0; 1038 rcu_torture_writer_state = RTWS_DELAY; 1039 udelay(torture_random(&rand) & 0x3ff); 1040 rcu_torture_writer_state = RTWS_REPLACE; 1041 old_rp = rcu_dereference_check(rcu_torture_current, 1042 current == writer_task); 1043 rp->rtort_mbtest = 1; 1044 rcu_assign_pointer(rcu_torture_current, rp); 1045 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1046 if (old_rp) { 1047 i = old_rp->rtort_pipe_count; 1048 if (i > RCU_TORTURE_PIPE_LEN) 1049 i = RCU_TORTURE_PIPE_LEN; 1050 atomic_inc(&rcu_torture_wcount[i]); 1051 WRITE_ONCE(old_rp->rtort_pipe_count, 1052 old_rp->rtort_pipe_count + 1); 1053 switch (synctype[torture_random(&rand) % nsynctypes]) { 1054 case RTWS_DEF_FREE: 1055 rcu_torture_writer_state = RTWS_DEF_FREE; 1056 cur_ops->deferred_free(old_rp); 1057 break; 1058 case RTWS_EXP_SYNC: 1059 rcu_torture_writer_state = RTWS_EXP_SYNC; 1060 cur_ops->exp_sync(); 1061 rcu_torture_pipe_update(old_rp); 1062 break; 1063 case RTWS_COND_GET: 1064 rcu_torture_writer_state = RTWS_COND_GET; 1065 gp_snap = cur_ops->get_state(); 1066 i = torture_random(&rand) % 16; 1067 if (i != 0) 1068 schedule_timeout_interruptible(i); 1069 udelay(torture_random(&rand) % 1000); 1070 rcu_torture_writer_state = RTWS_COND_SYNC; 1071 cur_ops->cond_sync(gp_snap); 1072 rcu_torture_pipe_update(old_rp); 1073 break; 1074 case RTWS_SYNC: 1075 rcu_torture_writer_state = RTWS_SYNC; 1076 cur_ops->sync(); 1077 rcu_torture_pipe_update(old_rp); 1078 break; 1079 default: 1080 WARN_ON_ONCE(1); 1081 break; 1082 } 1083 } 1084 WRITE_ONCE(rcu_torture_current_version, 1085 rcu_torture_current_version + 1); 1086 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1087 if (can_expedite && 1088 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1089 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1090 if (expediting >= 0) 1091 rcu_expedite_gp(); 1092 else 1093 rcu_unexpedite_gp(); 1094 if (++expediting > 3) 1095 expediting = -expediting; 1096 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1097 can_expedite = !rcu_gp_is_expedited() && 1098 !rcu_gp_is_normal(); 1099 } 1100 rcu_torture_writer_state = RTWS_STUTTER; 1101 if (stutter_wait("rcu_torture_writer") && 1102 !READ_ONCE(rcu_fwd_cb_nodelay) && 1103 !cur_ops->slow_gps && 1104 !torture_must_stop() && 1105 rcu_inkernel_boot_has_ended()) 1106 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1107 if (list_empty(&rcu_tortures[i].rtort_free) && 1108 rcu_access_pointer(rcu_torture_current) != 1109 &rcu_tortures[i]) { 1110 rcu_ftrace_dump(DUMP_ALL); 1111 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1112 } 1113 } while (!torture_must_stop()); 1114 /* Reset expediting back to unexpedited. */ 1115 if (expediting > 0) 1116 expediting = -expediting; 1117 while (can_expedite && expediting++ < 0) 1118 rcu_unexpedite_gp(); 1119 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1120 if (!can_expedite) 1121 pr_alert("%s" TORTURE_FLAG 1122 " Dynamic grace-period expediting was disabled.\n", 1123 torture_type); 1124 rcu_torture_writer_state = RTWS_STOPPING; 1125 torture_kthread_stopping("rcu_torture_writer"); 1126 return 0; 1127 } 1128 1129 /* 1130 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1131 * delay between calls. 1132 */ 1133 static int 1134 rcu_torture_fakewriter(void *arg) 1135 { 1136 DEFINE_TORTURE_RANDOM(rand); 1137 1138 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1139 set_user_nice(current, MAX_NICE); 1140 1141 do { 1142 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1143 udelay(torture_random(&rand) & 0x3ff); 1144 if (cur_ops->cb_barrier != NULL && 1145 torture_random(&rand) % (nfakewriters * 8) == 0) { 1146 cur_ops->cb_barrier(); 1147 } else if (gp_normal == gp_exp) { 1148 if (cur_ops->sync && torture_random(&rand) & 0x80) 1149 cur_ops->sync(); 1150 else if (cur_ops->exp_sync) 1151 cur_ops->exp_sync(); 1152 } else if (gp_normal && cur_ops->sync) { 1153 cur_ops->sync(); 1154 } else if (cur_ops->exp_sync) { 1155 cur_ops->exp_sync(); 1156 } 1157 stutter_wait("rcu_torture_fakewriter"); 1158 } while (!torture_must_stop()); 1159 1160 torture_kthread_stopping("rcu_torture_fakewriter"); 1161 return 0; 1162 } 1163 1164 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1165 { 1166 kfree(rhp); 1167 } 1168 1169 /* 1170 * Do one extension of an RCU read-side critical section using the 1171 * current reader state in readstate (set to zero for initial entry 1172 * to extended critical section), set the new state as specified by 1173 * newstate (set to zero for final exit from extended critical section), 1174 * and random-number-generator state in trsp. If this is neither the 1175 * beginning or end of the critical section and if there was actually a 1176 * change, do a ->read_delay(). 1177 */ 1178 static void rcutorture_one_extend(int *readstate, int newstate, 1179 struct torture_random_state *trsp, 1180 struct rt_read_seg *rtrsp) 1181 { 1182 unsigned long flags; 1183 int idxnew = -1; 1184 int idxold = *readstate; 1185 int statesnew = ~*readstate & newstate; 1186 int statesold = *readstate & ~newstate; 1187 1188 WARN_ON_ONCE(idxold < 0); 1189 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1190 rtrsp->rt_readstate = newstate; 1191 1192 /* First, put new protection in place to avoid critical-section gap. */ 1193 if (statesnew & RCUTORTURE_RDR_BH) 1194 local_bh_disable(); 1195 if (statesnew & RCUTORTURE_RDR_IRQ) 1196 local_irq_disable(); 1197 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1198 preempt_disable(); 1199 if (statesnew & RCUTORTURE_RDR_RBH) 1200 rcu_read_lock_bh(); 1201 if (statesnew & RCUTORTURE_RDR_SCHED) 1202 rcu_read_lock_sched(); 1203 if (statesnew & RCUTORTURE_RDR_RCU) 1204 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1205 1206 /* Next, remove old protection, irq first due to bh conflict. */ 1207 if (statesold & RCUTORTURE_RDR_IRQ) 1208 local_irq_enable(); 1209 if (statesold & RCUTORTURE_RDR_BH) 1210 local_bh_enable(); 1211 if (statesold & RCUTORTURE_RDR_PREEMPT) 1212 preempt_enable(); 1213 if (statesold & RCUTORTURE_RDR_RBH) 1214 rcu_read_unlock_bh(); 1215 if (statesold & RCUTORTURE_RDR_SCHED) 1216 rcu_read_unlock_sched(); 1217 if (statesold & RCUTORTURE_RDR_RCU) { 1218 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1219 1220 if (lockit) 1221 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1222 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1223 if (lockit) 1224 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1225 } 1226 1227 /* Delay if neither beginning nor end and there was a change. */ 1228 if ((statesnew || statesold) && *readstate && newstate) 1229 cur_ops->read_delay(trsp, rtrsp); 1230 1231 /* Update the reader state. */ 1232 if (idxnew == -1) 1233 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1234 WARN_ON_ONCE(idxnew < 0); 1235 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1236 *readstate = idxnew | newstate; 1237 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1238 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1239 } 1240 1241 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1242 static int rcutorture_extend_mask_max(void) 1243 { 1244 int mask; 1245 1246 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1247 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1248 mask = mask | RCUTORTURE_RDR_RCU; 1249 return mask; 1250 } 1251 1252 /* Return a random protection state mask, but with at least one bit set. */ 1253 static int 1254 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1255 { 1256 int mask = rcutorture_extend_mask_max(); 1257 unsigned long randmask1 = torture_random(trsp) >> 8; 1258 unsigned long randmask2 = randmask1 >> 3; 1259 1260 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1261 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1262 if (!(randmask1 & 0x7)) 1263 mask = mask & randmask2; 1264 else 1265 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1266 /* Can't enable bh w/irq disabled. */ 1267 if ((mask & RCUTORTURE_RDR_IRQ) && 1268 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1269 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1270 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1271 return mask ?: RCUTORTURE_RDR_RCU; 1272 } 1273 1274 /* 1275 * Do a randomly selected number of extensions of an existing RCU read-side 1276 * critical section. 1277 */ 1278 static struct rt_read_seg * 1279 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1280 struct rt_read_seg *rtrsp) 1281 { 1282 int i; 1283 int j; 1284 int mask = rcutorture_extend_mask_max(); 1285 1286 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1287 if (!((mask - 1) & mask)) 1288 return rtrsp; /* Current RCU reader not extendable. */ 1289 /* Bias towards larger numbers of loops. */ 1290 i = (torture_random(trsp) >> 3); 1291 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1292 for (j = 0; j < i; j++) { 1293 mask = rcutorture_extend_mask(*readstate, trsp); 1294 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1295 } 1296 return &rtrsp[j]; 1297 } 1298 1299 /* 1300 * Do one read-side critical section, returning false if there was 1301 * no data to read. Can be invoked both from process context and 1302 * from a timer handler. 1303 */ 1304 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1305 { 1306 int i; 1307 unsigned long started; 1308 unsigned long completed; 1309 int newstate; 1310 struct rcu_torture *p; 1311 int pipe_count; 1312 int readstate = 0; 1313 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1314 struct rt_read_seg *rtrsp = &rtseg[0]; 1315 struct rt_read_seg *rtrsp1; 1316 unsigned long long ts; 1317 1318 newstate = rcutorture_extend_mask(readstate, trsp); 1319 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1320 started = cur_ops->get_gp_seq(); 1321 ts = rcu_trace_clock_local(); 1322 p = rcu_dereference_check(rcu_torture_current, 1323 rcu_read_lock_bh_held() || 1324 rcu_read_lock_sched_held() || 1325 srcu_read_lock_held(srcu_ctlp) || 1326 torturing_tasks()); 1327 if (p == NULL) { 1328 /* Wait for rcu_torture_writer to get underway */ 1329 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1330 return false; 1331 } 1332 if (p->rtort_mbtest == 0) 1333 atomic_inc(&n_rcu_torture_mberror); 1334 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1335 preempt_disable(); 1336 pipe_count = READ_ONCE(p->rtort_pipe_count); 1337 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1338 /* Should not happen, but... */ 1339 pipe_count = RCU_TORTURE_PIPE_LEN; 1340 } 1341 completed = cur_ops->get_gp_seq(); 1342 if (pipe_count > 1) { 1343 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1344 ts, started, completed); 1345 rcu_ftrace_dump(DUMP_ALL); 1346 } 1347 __this_cpu_inc(rcu_torture_count[pipe_count]); 1348 completed = rcutorture_seq_diff(completed, started); 1349 if (completed > RCU_TORTURE_PIPE_LEN) { 1350 /* Should not happen, but... */ 1351 completed = RCU_TORTURE_PIPE_LEN; 1352 } 1353 __this_cpu_inc(rcu_torture_batch[completed]); 1354 preempt_enable(); 1355 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1356 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1357 1358 /* If error or close call, record the sequence of reader protections. */ 1359 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1360 i = 0; 1361 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1362 err_segs[i++] = *rtrsp1; 1363 rt_read_nsegs = i; 1364 } 1365 1366 return true; 1367 } 1368 1369 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1370 1371 /* 1372 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1373 * incrementing the corresponding element of the pipeline array. The 1374 * counter in the element should never be greater than 1, otherwise, the 1375 * RCU implementation is broken. 1376 */ 1377 static void rcu_torture_timer(struct timer_list *unused) 1378 { 1379 atomic_long_inc(&n_rcu_torture_timers); 1380 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1381 1382 /* Test call_rcu() invocation from interrupt handler. */ 1383 if (cur_ops->call) { 1384 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1385 1386 if (rhp) 1387 cur_ops->call(rhp, rcu_torture_timer_cb); 1388 } 1389 } 1390 1391 /* 1392 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1393 * incrementing the corresponding element of the pipeline array. The 1394 * counter in the element should never be greater than 1, otherwise, the 1395 * RCU implementation is broken. 1396 */ 1397 static int 1398 rcu_torture_reader(void *arg) 1399 { 1400 unsigned long lastsleep = jiffies; 1401 long myid = (long)arg; 1402 int mynumonline = myid; 1403 DEFINE_TORTURE_RANDOM(rand); 1404 struct timer_list t; 1405 1406 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1407 set_user_nice(current, MAX_NICE); 1408 if (irqreader && cur_ops->irq_capable) 1409 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1410 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1411 do { 1412 if (irqreader && cur_ops->irq_capable) { 1413 if (!timer_pending(&t)) 1414 mod_timer(&t, jiffies + 1); 1415 } 1416 if (!rcu_torture_one_read(&rand) && !torture_must_stop()) 1417 schedule_timeout_interruptible(HZ); 1418 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1419 schedule_timeout_interruptible(1); 1420 lastsleep = jiffies + 10; 1421 } 1422 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1423 schedule_timeout_interruptible(HZ / 5); 1424 stutter_wait("rcu_torture_reader"); 1425 } while (!torture_must_stop()); 1426 if (irqreader && cur_ops->irq_capable) { 1427 del_timer_sync(&t); 1428 destroy_timer_on_stack(&t); 1429 } 1430 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1431 torture_kthread_stopping("rcu_torture_reader"); 1432 return 0; 1433 } 1434 1435 /* 1436 * Print torture statistics. Caller must ensure that there is only 1437 * one call to this function at a given time!!! This is normally 1438 * accomplished by relying on the module system to only have one copy 1439 * of the module loaded, and then by giving the rcu_torture_stats 1440 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1441 * thread is not running). 1442 */ 1443 static void 1444 rcu_torture_stats_print(void) 1445 { 1446 int cpu; 1447 int i; 1448 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1449 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1450 struct rcu_torture *rtcp; 1451 static unsigned long rtcv_snap = ULONG_MAX; 1452 static bool splatted; 1453 struct task_struct *wtp; 1454 1455 for_each_possible_cpu(cpu) { 1456 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1457 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1458 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1459 } 1460 } 1461 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1462 if (pipesummary[i] != 0) 1463 break; 1464 } 1465 1466 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1467 rtcp = rcu_access_pointer(rcu_torture_current); 1468 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1469 rtcp, 1470 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1471 rcu_torture_current_version, 1472 list_empty(&rcu_torture_freelist), 1473 atomic_read(&n_rcu_torture_alloc), 1474 atomic_read(&n_rcu_torture_alloc_fail), 1475 atomic_read(&n_rcu_torture_free)); 1476 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1477 atomic_read(&n_rcu_torture_mberror), 1478 n_rcu_torture_barrier_error, 1479 n_rcu_torture_boost_ktrerror, 1480 n_rcu_torture_boost_rterror); 1481 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1482 n_rcu_torture_boost_failure, 1483 n_rcu_torture_boosts, 1484 atomic_long_read(&n_rcu_torture_timers)); 1485 torture_onoff_stats(); 1486 pr_cont("barrier: %ld/%ld:%ld\n", 1487 n_barrier_successes, 1488 n_barrier_attempts, 1489 n_rcu_torture_barrier_error); 1490 1491 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1492 if (atomic_read(&n_rcu_torture_mberror) || 1493 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1494 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1495 i > 1) { 1496 pr_cont("%s", "!!! "); 1497 atomic_inc(&n_rcu_torture_error); 1498 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1499 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1500 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1501 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1502 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed 1503 WARN_ON_ONCE(i > 1); // Too-short grace period 1504 } 1505 pr_cont("Reader Pipe: "); 1506 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1507 pr_cont(" %ld", pipesummary[i]); 1508 pr_cont("\n"); 1509 1510 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1511 pr_cont("Reader Batch: "); 1512 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1513 pr_cont(" %ld", batchsummary[i]); 1514 pr_cont("\n"); 1515 1516 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1517 pr_cont("Free-Block Circulation: "); 1518 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1519 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1520 } 1521 pr_cont("\n"); 1522 1523 if (cur_ops->stats) 1524 cur_ops->stats(); 1525 if (rtcv_snap == rcu_torture_current_version && 1526 rcu_access_pointer(rcu_torture_current) && 1527 !rcu_stall_is_suppressed()) { 1528 int __maybe_unused flags = 0; 1529 unsigned long __maybe_unused gp_seq = 0; 1530 1531 rcutorture_get_gp_data(cur_ops->ttype, 1532 &flags, &gp_seq); 1533 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1534 &flags, &gp_seq); 1535 wtp = READ_ONCE(writer_task); 1536 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1537 rcu_torture_writer_state_getname(), 1538 rcu_torture_writer_state, gp_seq, flags, 1539 wtp == NULL ? ~0UL : wtp->state, 1540 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1541 if (!splatted && wtp) { 1542 sched_show_task(wtp); 1543 splatted = true; 1544 } 1545 show_rcu_gp_kthreads(); 1546 rcu_ftrace_dump(DUMP_ALL); 1547 } 1548 rtcv_snap = rcu_torture_current_version; 1549 } 1550 1551 /* 1552 * Periodically prints torture statistics, if periodic statistics printing 1553 * was specified via the stat_interval module parameter. 1554 */ 1555 static int 1556 rcu_torture_stats(void *arg) 1557 { 1558 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1559 do { 1560 schedule_timeout_interruptible(stat_interval * HZ); 1561 rcu_torture_stats_print(); 1562 torture_shutdown_absorb("rcu_torture_stats"); 1563 } while (!torture_must_stop()); 1564 torture_kthread_stopping("rcu_torture_stats"); 1565 return 0; 1566 } 1567 1568 static void 1569 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1570 { 1571 pr_alert("%s" TORTURE_FLAG 1572 "--- %s: nreaders=%d nfakewriters=%d " 1573 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1574 "shuffle_interval=%d stutter=%d irqreader=%d " 1575 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1576 "test_boost=%d/%d test_boost_interval=%d " 1577 "test_boost_duration=%d shutdown_secs=%d " 1578 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1579 "n_barrier_cbs=%d " 1580 "onoff_interval=%d onoff_holdoff=%d\n", 1581 torture_type, tag, nrealreaders, nfakewriters, 1582 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1583 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1584 test_boost, cur_ops->can_boost, 1585 test_boost_interval, test_boost_duration, shutdown_secs, 1586 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1587 n_barrier_cbs, 1588 onoff_interval, onoff_holdoff); 1589 } 1590 1591 static int rcutorture_booster_cleanup(unsigned int cpu) 1592 { 1593 struct task_struct *t; 1594 1595 if (boost_tasks[cpu] == NULL) 1596 return 0; 1597 mutex_lock(&boost_mutex); 1598 t = boost_tasks[cpu]; 1599 boost_tasks[cpu] = NULL; 1600 rcu_torture_enable_rt_throttle(); 1601 mutex_unlock(&boost_mutex); 1602 1603 /* This must be outside of the mutex, otherwise deadlock! */ 1604 torture_stop_kthread(rcu_torture_boost, t); 1605 return 0; 1606 } 1607 1608 static int rcutorture_booster_init(unsigned int cpu) 1609 { 1610 int retval; 1611 1612 if (boost_tasks[cpu] != NULL) 1613 return 0; /* Already created, nothing more to do. */ 1614 1615 /* Don't allow time recalculation while creating a new task. */ 1616 mutex_lock(&boost_mutex); 1617 rcu_torture_disable_rt_throttle(); 1618 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1619 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1620 cpu_to_node(cpu), 1621 "rcu_torture_boost"); 1622 if (IS_ERR(boost_tasks[cpu])) { 1623 retval = PTR_ERR(boost_tasks[cpu]); 1624 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1625 n_rcu_torture_boost_ktrerror++; 1626 boost_tasks[cpu] = NULL; 1627 mutex_unlock(&boost_mutex); 1628 return retval; 1629 } 1630 kthread_bind(boost_tasks[cpu], cpu); 1631 wake_up_process(boost_tasks[cpu]); 1632 mutex_unlock(&boost_mutex); 1633 return 0; 1634 } 1635 1636 /* 1637 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1638 * induces a CPU stall for the time specified by stall_cpu. 1639 */ 1640 static int rcu_torture_stall(void *args) 1641 { 1642 unsigned long stop_at; 1643 1644 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1645 if (stall_cpu_holdoff > 0) { 1646 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1647 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1648 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1649 } 1650 if (!kthread_should_stop()) { 1651 stop_at = ktime_get_seconds() + stall_cpu; 1652 /* RCU CPU stall is expected behavior in following code. */ 1653 rcu_read_lock(); 1654 if (stall_cpu_irqsoff) 1655 local_irq_disable(); 1656 else 1657 preempt_disable(); 1658 pr_alert("rcu_torture_stall start on CPU %d.\n", 1659 smp_processor_id()); 1660 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1661 stop_at)) 1662 continue; /* Induce RCU CPU stall warning. */ 1663 if (stall_cpu_irqsoff) 1664 local_irq_enable(); 1665 else 1666 preempt_enable(); 1667 rcu_read_unlock(); 1668 pr_alert("rcu_torture_stall end.\n"); 1669 } 1670 torture_shutdown_absorb("rcu_torture_stall"); 1671 while (!kthread_should_stop()) 1672 schedule_timeout_interruptible(10 * HZ); 1673 return 0; 1674 } 1675 1676 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1677 static int __init rcu_torture_stall_init(void) 1678 { 1679 if (stall_cpu <= 0) 1680 return 0; 1681 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1682 } 1683 1684 /* State structure for forward-progress self-propagating RCU callback. */ 1685 struct fwd_cb_state { 1686 struct rcu_head rh; 1687 int stop; 1688 }; 1689 1690 /* 1691 * Forward-progress self-propagating RCU callback function. Because 1692 * callbacks run from softirq, this function is an implicit RCU read-side 1693 * critical section. 1694 */ 1695 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1696 { 1697 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1698 1699 if (READ_ONCE(fcsp->stop)) { 1700 WRITE_ONCE(fcsp->stop, 2); 1701 return; 1702 } 1703 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1704 } 1705 1706 /* State for continuous-flood RCU callbacks. */ 1707 struct rcu_fwd_cb { 1708 struct rcu_head rh; 1709 struct rcu_fwd_cb *rfc_next; 1710 struct rcu_fwd *rfc_rfp; 1711 int rfc_gps; 1712 }; 1713 1714 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1715 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1716 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1717 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1718 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1719 1720 struct rcu_launder_hist { 1721 long n_launders; 1722 unsigned long launder_gp_seq; 1723 }; 1724 1725 struct rcu_fwd { 1726 spinlock_t rcu_fwd_lock; 1727 struct rcu_fwd_cb *rcu_fwd_cb_head; 1728 struct rcu_fwd_cb **rcu_fwd_cb_tail; 1729 long n_launders_cb; 1730 unsigned long rcu_fwd_startat; 1731 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1732 unsigned long rcu_launder_gp_seq_start; 1733 }; 1734 1735 struct rcu_fwd *rcu_fwds; 1736 bool rcu_fwd_emergency_stop; 1737 1738 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 1739 { 1740 unsigned long gps; 1741 unsigned long gps_old; 1742 int i; 1743 int j; 1744 1745 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 1746 if (rfp->n_launders_hist[i].n_launders > 0) 1747 break; 1748 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1749 __func__, jiffies - rfp->rcu_fwd_startat); 1750 gps_old = rfp->rcu_launder_gp_seq_start; 1751 for (j = 0; j <= i; j++) { 1752 gps = rfp->n_launders_hist[j].launder_gp_seq; 1753 pr_cont(" %ds/%d: %ld:%ld", 1754 j + 1, FWD_CBS_HIST_DIV, 1755 rfp->n_launders_hist[j].n_launders, 1756 rcutorture_seq_diff(gps, gps_old)); 1757 gps_old = gps; 1758 } 1759 pr_cont("\n"); 1760 } 1761 1762 /* Callback function for continuous-flood RCU callbacks. */ 1763 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1764 { 1765 unsigned long flags; 1766 int i; 1767 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1768 struct rcu_fwd_cb **rfcpp; 1769 struct rcu_fwd *rfp = rfcp->rfc_rfp; 1770 1771 rfcp->rfc_next = NULL; 1772 rfcp->rfc_gps++; 1773 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1774 rfcpp = rfp->rcu_fwd_cb_tail; 1775 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 1776 WRITE_ONCE(*rfcpp, rfcp); 1777 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 1778 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1779 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 1780 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 1781 rfp->n_launders_hist[i].n_launders++; 1782 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1783 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1784 } 1785 1786 // Give the scheduler a chance, even on nohz_full CPUs. 1787 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 1788 { 1789 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1790 // Real call_rcu() floods hit userspace, so emulate that. 1791 if (need_resched() || (iter & 0xfff)) 1792 schedule(); 1793 return; 1794 } 1795 // No userspace emulation: CB invocation throttles call_rcu() 1796 cond_resched(); 1797 } 1798 1799 /* 1800 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1801 * test is over or because we hit an OOM event. 1802 */ 1803 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 1804 { 1805 unsigned long flags; 1806 unsigned long freed = 0; 1807 struct rcu_fwd_cb *rfcp; 1808 1809 for (;;) { 1810 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1811 rfcp = rfp->rcu_fwd_cb_head; 1812 if (!rfcp) { 1813 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1814 break; 1815 } 1816 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 1817 if (!rfp->rcu_fwd_cb_head) 1818 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 1819 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1820 kfree(rfcp); 1821 freed++; 1822 rcu_torture_fwd_prog_cond_resched(freed); 1823 if (tick_nohz_full_enabled()) { 1824 local_irq_save(flags); 1825 rcu_momentary_dyntick_idle(); 1826 local_irq_restore(flags); 1827 } 1828 } 1829 return freed; 1830 } 1831 1832 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1833 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 1834 int *tested, int *tested_tries) 1835 { 1836 unsigned long cver; 1837 unsigned long dur; 1838 struct fwd_cb_state fcs; 1839 unsigned long gps; 1840 int idx; 1841 int sd; 1842 int sd4; 1843 bool selfpropcb = false; 1844 unsigned long stopat; 1845 static DEFINE_TORTURE_RANDOM(trs); 1846 1847 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1848 init_rcu_head_on_stack(&fcs.rh); 1849 selfpropcb = true; 1850 } 1851 1852 /* Tight loop containing cond_resched(). */ 1853 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1854 cur_ops->sync(); /* Later readers see above write. */ 1855 if (selfpropcb) { 1856 WRITE_ONCE(fcs.stop, 0); 1857 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1858 } 1859 cver = READ_ONCE(rcu_torture_current_version); 1860 gps = cur_ops->get_gp_seq(); 1861 sd = cur_ops->stall_dur() + 1; 1862 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1863 dur = sd4 + torture_random(&trs) % (sd - sd4); 1864 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1865 stopat = rfp->rcu_fwd_startat + dur; 1866 while (time_before(jiffies, stopat) && 1867 !shutdown_time_arrived() && 1868 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1869 idx = cur_ops->readlock(); 1870 udelay(10); 1871 cur_ops->readunlock(idx); 1872 if (!fwd_progress_need_resched || need_resched()) 1873 cond_resched(); 1874 } 1875 (*tested_tries)++; 1876 if (!time_before(jiffies, stopat) && 1877 !shutdown_time_arrived() && 1878 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1879 (*tested)++; 1880 cver = READ_ONCE(rcu_torture_current_version) - cver; 1881 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1882 WARN_ON(!cver && gps < 2); 1883 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1884 } 1885 if (selfpropcb) { 1886 WRITE_ONCE(fcs.stop, 1); 1887 cur_ops->sync(); /* Wait for running CB to complete. */ 1888 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1889 } 1890 1891 if (selfpropcb) { 1892 WARN_ON(READ_ONCE(fcs.stop) != 2); 1893 destroy_rcu_head_on_stack(&fcs.rh); 1894 } 1895 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1896 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1897 } 1898 1899 /* Carry out call_rcu() forward-progress testing. */ 1900 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 1901 { 1902 unsigned long cver; 1903 unsigned long flags; 1904 unsigned long gps; 1905 int i; 1906 long n_launders; 1907 long n_launders_cb_snap; 1908 long n_launders_sa; 1909 long n_max_cbs; 1910 long n_max_gps; 1911 struct rcu_fwd_cb *rfcp; 1912 struct rcu_fwd_cb *rfcpn; 1913 unsigned long stopat; 1914 unsigned long stoppedat; 1915 1916 if (READ_ONCE(rcu_fwd_emergency_stop)) 1917 return; /* Get out of the way quickly, no GP wait! */ 1918 if (!cur_ops->call) 1919 return; /* Can't do call_rcu() fwd prog without ->call. */ 1920 1921 /* Loop continuously posting RCU callbacks. */ 1922 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1923 cur_ops->sync(); /* Later readers see above write. */ 1924 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1925 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1926 n_launders = 0; 1927 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 1928 n_launders_sa = 0; 1929 n_max_cbs = 0; 1930 n_max_gps = 0; 1931 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 1932 rfp->n_launders_hist[i].n_launders = 0; 1933 cver = READ_ONCE(rcu_torture_current_version); 1934 gps = cur_ops->get_gp_seq(); 1935 rfp->rcu_launder_gp_seq_start = gps; 1936 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1937 while (time_before(jiffies, stopat) && 1938 !shutdown_time_arrived() && 1939 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1940 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 1941 rfcpn = NULL; 1942 if (rfcp) 1943 rfcpn = READ_ONCE(rfcp->rfc_next); 1944 if (rfcpn) { 1945 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 1946 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 1947 break; 1948 rfp->rcu_fwd_cb_head = rfcpn; 1949 n_launders++; 1950 n_launders_sa++; 1951 } else { 1952 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 1953 if (WARN_ON_ONCE(!rfcp)) { 1954 schedule_timeout_interruptible(1); 1955 continue; 1956 } 1957 n_max_cbs++; 1958 n_launders_sa = 0; 1959 rfcp->rfc_gps = 0; 1960 rfcp->rfc_rfp = rfp; 1961 } 1962 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 1963 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 1964 if (tick_nohz_full_enabled()) { 1965 local_irq_save(flags); 1966 rcu_momentary_dyntick_idle(); 1967 local_irq_restore(flags); 1968 } 1969 } 1970 stoppedat = jiffies; 1971 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 1972 cver = READ_ONCE(rcu_torture_current_version) - cver; 1973 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1974 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 1975 (void)rcu_torture_fwd_prog_cbfree(rfp); 1976 1977 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 1978 !shutdown_time_arrived()) { 1979 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 1980 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 1981 __func__, 1982 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 1983 n_launders + n_max_cbs - n_launders_cb_snap, 1984 n_launders, n_launders_sa, 1985 n_max_gps, n_max_cbs, cver, gps); 1986 rcu_torture_fwd_cb_hist(rfp); 1987 } 1988 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 1989 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1990 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1991 } 1992 1993 1994 /* 1995 * OOM notifier, but this only prints diagnostic information for the 1996 * current forward-progress test. 1997 */ 1998 static int rcutorture_oom_notify(struct notifier_block *self, 1999 unsigned long notused, void *nfreed) 2000 { 2001 struct rcu_fwd *rfp = rcu_fwds; 2002 2003 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2004 __func__); 2005 rcu_torture_fwd_cb_hist(rfp); 2006 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2007 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2008 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2009 pr_info("%s: Freed %lu RCU callbacks.\n", 2010 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2011 rcu_barrier(); 2012 pr_info("%s: Freed %lu RCU callbacks.\n", 2013 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2014 rcu_barrier(); 2015 pr_info("%s: Freed %lu RCU callbacks.\n", 2016 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2017 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2018 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2019 pr_info("%s returning after OOM processing.\n", __func__); 2020 return NOTIFY_OK; 2021 } 2022 2023 static struct notifier_block rcutorture_oom_nb = { 2024 .notifier_call = rcutorture_oom_notify 2025 }; 2026 2027 /* Carry out grace-period forward-progress testing. */ 2028 static int rcu_torture_fwd_prog(void *args) 2029 { 2030 struct rcu_fwd *rfp = args; 2031 int tested = 0; 2032 int tested_tries = 0; 2033 2034 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2035 rcu_bind_current_to_nocb(); 2036 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2037 set_user_nice(current, MAX_NICE); 2038 do { 2039 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2040 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2041 register_oom_notifier(&rcutorture_oom_nb); 2042 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2043 rcu_inkernel_boot_has_ended()) 2044 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2045 if (rcu_inkernel_boot_has_ended()) 2046 rcu_torture_fwd_prog_cr(rfp); 2047 unregister_oom_notifier(&rcutorture_oom_nb); 2048 2049 /* Avoid slow periods, better to test when busy. */ 2050 stutter_wait("rcu_torture_fwd_prog"); 2051 } while (!torture_must_stop()); 2052 /* Short runs might not contain a valid forward-progress attempt. */ 2053 WARN_ON(!tested && tested_tries >= 5); 2054 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2055 torture_kthread_stopping("rcu_torture_fwd_prog"); 2056 return 0; 2057 } 2058 2059 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2060 static int __init rcu_torture_fwd_prog_init(void) 2061 { 2062 struct rcu_fwd *rfp; 2063 2064 if (!fwd_progress) 2065 return 0; /* Not requested, so don't do it. */ 2066 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 2067 cur_ops == &rcu_busted_ops) { 2068 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2069 return 0; 2070 } 2071 if (stall_cpu > 0) { 2072 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2073 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2074 return -EINVAL; /* In module, can fail back to user. */ 2075 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2076 return 0; 2077 } 2078 if (fwd_progress_holdoff <= 0) 2079 fwd_progress_holdoff = 1; 2080 if (fwd_progress_div <= 0) 2081 fwd_progress_div = 4; 2082 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2083 if (!rfp) 2084 return -ENOMEM; 2085 spin_lock_init(&rfp->rcu_fwd_lock); 2086 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2087 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2088 } 2089 2090 /* Callback function for RCU barrier testing. */ 2091 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2092 { 2093 atomic_inc(&barrier_cbs_invoked); 2094 } 2095 2096 /* IPI handler to get callback posted on desired CPU, if online. */ 2097 static void rcu_torture_barrier1cb(void *rcu_void) 2098 { 2099 struct rcu_head *rhp = rcu_void; 2100 2101 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2102 } 2103 2104 /* kthread function to register callbacks used to test RCU barriers. */ 2105 static int rcu_torture_barrier_cbs(void *arg) 2106 { 2107 long myid = (long)arg; 2108 bool lastphase = 0; 2109 bool newphase; 2110 struct rcu_head rcu; 2111 2112 init_rcu_head_on_stack(&rcu); 2113 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2114 set_user_nice(current, MAX_NICE); 2115 do { 2116 wait_event(barrier_cbs_wq[myid], 2117 (newphase = 2118 smp_load_acquire(&barrier_phase)) != lastphase || 2119 torture_must_stop()); 2120 lastphase = newphase; 2121 if (torture_must_stop()) 2122 break; 2123 /* 2124 * The above smp_load_acquire() ensures barrier_phase load 2125 * is ordered before the following ->call(). 2126 */ 2127 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2128 &rcu, 1)) { 2129 // IPI failed, so use direct call from current CPU. 2130 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2131 } 2132 if (atomic_dec_and_test(&barrier_cbs_count)) 2133 wake_up(&barrier_wq); 2134 } while (!torture_must_stop()); 2135 if (cur_ops->cb_barrier != NULL) 2136 cur_ops->cb_barrier(); 2137 destroy_rcu_head_on_stack(&rcu); 2138 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2139 return 0; 2140 } 2141 2142 /* kthread function to drive and coordinate RCU barrier testing. */ 2143 static int rcu_torture_barrier(void *arg) 2144 { 2145 int i; 2146 2147 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2148 do { 2149 atomic_set(&barrier_cbs_invoked, 0); 2150 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2151 /* Ensure barrier_phase ordered after prior assignments. */ 2152 smp_store_release(&barrier_phase, !barrier_phase); 2153 for (i = 0; i < n_barrier_cbs; i++) 2154 wake_up(&barrier_cbs_wq[i]); 2155 wait_event(barrier_wq, 2156 atomic_read(&barrier_cbs_count) == 0 || 2157 torture_must_stop()); 2158 if (torture_must_stop()) 2159 break; 2160 n_barrier_attempts++; 2161 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2162 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2163 n_rcu_torture_barrier_error++; 2164 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2165 atomic_read(&barrier_cbs_invoked), 2166 n_barrier_cbs); 2167 WARN_ON(1); 2168 // Wait manually for the remaining callbacks 2169 i = 0; 2170 do { 2171 if (WARN_ON(i++ > HZ)) 2172 i = INT_MIN; 2173 schedule_timeout_interruptible(1); 2174 cur_ops->cb_barrier(); 2175 } while (atomic_read(&barrier_cbs_invoked) != 2176 n_barrier_cbs && 2177 !torture_must_stop()); 2178 smp_mb(); // Can't trust ordering if broken. 2179 if (!torture_must_stop()) 2180 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2181 atomic_read(&barrier_cbs_invoked)); 2182 } else { 2183 n_barrier_successes++; 2184 } 2185 schedule_timeout_interruptible(HZ / 10); 2186 } while (!torture_must_stop()); 2187 torture_kthread_stopping("rcu_torture_barrier"); 2188 return 0; 2189 } 2190 2191 /* Initialize RCU barrier testing. */ 2192 static int rcu_torture_barrier_init(void) 2193 { 2194 int i; 2195 int ret; 2196 2197 if (n_barrier_cbs <= 0) 2198 return 0; 2199 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2200 pr_alert("%s" TORTURE_FLAG 2201 " Call or barrier ops missing for %s,\n", 2202 torture_type, cur_ops->name); 2203 pr_alert("%s" TORTURE_FLAG 2204 " RCU barrier testing omitted from run.\n", 2205 torture_type); 2206 return 0; 2207 } 2208 atomic_set(&barrier_cbs_count, 0); 2209 atomic_set(&barrier_cbs_invoked, 0); 2210 barrier_cbs_tasks = 2211 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2212 GFP_KERNEL); 2213 barrier_cbs_wq = 2214 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2215 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2216 return -ENOMEM; 2217 for (i = 0; i < n_barrier_cbs; i++) { 2218 init_waitqueue_head(&barrier_cbs_wq[i]); 2219 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2220 (void *)(long)i, 2221 barrier_cbs_tasks[i]); 2222 if (ret) 2223 return ret; 2224 } 2225 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2226 } 2227 2228 /* Clean up after RCU barrier testing. */ 2229 static void rcu_torture_barrier_cleanup(void) 2230 { 2231 int i; 2232 2233 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2234 if (barrier_cbs_tasks != NULL) { 2235 for (i = 0; i < n_barrier_cbs; i++) 2236 torture_stop_kthread(rcu_torture_barrier_cbs, 2237 barrier_cbs_tasks[i]); 2238 kfree(barrier_cbs_tasks); 2239 barrier_cbs_tasks = NULL; 2240 } 2241 if (barrier_cbs_wq != NULL) { 2242 kfree(barrier_cbs_wq); 2243 barrier_cbs_wq = NULL; 2244 } 2245 } 2246 2247 static bool rcu_torture_can_boost(void) 2248 { 2249 static int boost_warn_once; 2250 int prio; 2251 2252 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2253 return false; 2254 2255 prio = rcu_get_gp_kthreads_prio(); 2256 if (!prio) 2257 return false; 2258 2259 if (prio < 2) { 2260 if (boost_warn_once == 1) 2261 return false; 2262 2263 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2264 boost_warn_once = 1; 2265 return false; 2266 } 2267 2268 return true; 2269 } 2270 2271 static enum cpuhp_state rcutor_hp; 2272 2273 static void 2274 rcu_torture_cleanup(void) 2275 { 2276 int firsttime; 2277 int flags = 0; 2278 unsigned long gp_seq = 0; 2279 int i; 2280 2281 if (torture_cleanup_begin()) { 2282 if (cur_ops->cb_barrier != NULL) 2283 cur_ops->cb_barrier(); 2284 return; 2285 } 2286 if (!cur_ops) { 2287 torture_cleanup_end(); 2288 return; 2289 } 2290 2291 show_rcu_gp_kthreads(); 2292 rcu_torture_barrier_cleanup(); 2293 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2294 torture_stop_kthread(rcu_torture_stall, stall_task); 2295 torture_stop_kthread(rcu_torture_writer, writer_task); 2296 2297 if (reader_tasks) { 2298 for (i = 0; i < nrealreaders; i++) 2299 torture_stop_kthread(rcu_torture_reader, 2300 reader_tasks[i]); 2301 kfree(reader_tasks); 2302 } 2303 rcu_torture_current = NULL; 2304 2305 if (fakewriter_tasks) { 2306 for (i = 0; i < nfakewriters; i++) { 2307 torture_stop_kthread(rcu_torture_fakewriter, 2308 fakewriter_tasks[i]); 2309 } 2310 kfree(fakewriter_tasks); 2311 fakewriter_tasks = NULL; 2312 } 2313 2314 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2315 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2316 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2317 cur_ops->name, gp_seq, flags); 2318 torture_stop_kthread(rcu_torture_stats, stats_task); 2319 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2320 if (rcu_torture_can_boost()) 2321 cpuhp_remove_state(rcutor_hp); 2322 2323 /* 2324 * Wait for all RCU callbacks to fire, then do torture-type-specific 2325 * cleanup operations. 2326 */ 2327 if (cur_ops->cb_barrier != NULL) 2328 cur_ops->cb_barrier(); 2329 if (cur_ops->cleanup != NULL) 2330 cur_ops->cleanup(); 2331 2332 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2333 2334 if (err_segs_recorded) { 2335 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2336 if (rt_read_nsegs == 0) 2337 pr_alert("\t: No segments recorded!!!\n"); 2338 firsttime = 1; 2339 for (i = 0; i < rt_read_nsegs; i++) { 2340 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2341 if (err_segs[i].rt_delay_jiffies != 0) { 2342 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2343 err_segs[i].rt_delay_jiffies); 2344 firsttime = 0; 2345 } 2346 if (err_segs[i].rt_delay_ms != 0) { 2347 pr_cont("%s%ldms", firsttime ? "" : "+", 2348 err_segs[i].rt_delay_ms); 2349 firsttime = 0; 2350 } 2351 if (err_segs[i].rt_delay_us != 0) { 2352 pr_cont("%s%ldus", firsttime ? "" : "+", 2353 err_segs[i].rt_delay_us); 2354 firsttime = 0; 2355 } 2356 pr_cont("%s\n", 2357 err_segs[i].rt_preempted ? "preempted" : ""); 2358 2359 } 2360 } 2361 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2362 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2363 else if (torture_onoff_failures()) 2364 rcu_torture_print_module_parms(cur_ops, 2365 "End of test: RCU_HOTPLUG"); 2366 else 2367 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2368 torture_cleanup_end(); 2369 } 2370 2371 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2372 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2373 { 2374 } 2375 2376 static void rcu_torture_err_cb(struct rcu_head *rhp) 2377 { 2378 /* 2379 * This -might- happen due to race conditions, but is unlikely. 2380 * The scenario that leads to this happening is that the 2381 * first of the pair of duplicate callbacks is queued, 2382 * someone else starts a grace period that includes that 2383 * callback, then the second of the pair must wait for the 2384 * next grace period. Unlikely, but can happen. If it 2385 * does happen, the debug-objects subsystem won't have splatted. 2386 */ 2387 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2388 } 2389 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2390 2391 /* 2392 * Verify that double-free causes debug-objects to complain, but only 2393 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2394 * cannot be carried out. 2395 */ 2396 static void rcu_test_debug_objects(void) 2397 { 2398 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2399 struct rcu_head rh1; 2400 struct rcu_head rh2; 2401 2402 init_rcu_head_on_stack(&rh1); 2403 init_rcu_head_on_stack(&rh2); 2404 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2405 2406 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2407 preempt_disable(); /* Prevent preemption from interrupting test. */ 2408 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2409 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2410 local_irq_disable(); /* Make it harder to start a new grace period. */ 2411 call_rcu(&rh2, rcu_torture_leak_cb); 2412 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2413 local_irq_enable(); 2414 rcu_read_unlock(); 2415 preempt_enable(); 2416 2417 /* Wait for them all to get done so we can safely return. */ 2418 rcu_barrier(); 2419 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2420 destroy_rcu_head_on_stack(&rh1); 2421 destroy_rcu_head_on_stack(&rh2); 2422 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2423 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2424 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2425 } 2426 2427 static void rcutorture_sync(void) 2428 { 2429 static unsigned long n; 2430 2431 if (cur_ops->sync && !(++n & 0xfff)) 2432 cur_ops->sync(); 2433 } 2434 2435 static int __init 2436 rcu_torture_init(void) 2437 { 2438 long i; 2439 int cpu; 2440 int firsterr = 0; 2441 static struct rcu_torture_ops *torture_ops[] = { 2442 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2443 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, &trivial_ops, 2444 }; 2445 2446 if (!torture_init_begin(torture_type, verbose)) 2447 return -EBUSY; 2448 2449 /* Process args and tell the world that the torturer is on the job. */ 2450 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2451 cur_ops = torture_ops[i]; 2452 if (strcmp(torture_type, cur_ops->name) == 0) 2453 break; 2454 } 2455 if (i == ARRAY_SIZE(torture_ops)) { 2456 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2457 torture_type); 2458 pr_alert("rcu-torture types:"); 2459 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2460 pr_cont(" %s", torture_ops[i]->name); 2461 pr_cont("\n"); 2462 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2463 firsterr = -EINVAL; 2464 cur_ops = NULL; 2465 goto unwind; 2466 } 2467 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2468 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2469 fqs_duration = 0; 2470 } 2471 if (cur_ops->init) 2472 cur_ops->init(); 2473 2474 if (nreaders >= 0) { 2475 nrealreaders = nreaders; 2476 } else { 2477 nrealreaders = num_online_cpus() - 2 - nreaders; 2478 if (nrealreaders <= 0) 2479 nrealreaders = 1; 2480 } 2481 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2482 2483 /* Set up the freelist. */ 2484 2485 INIT_LIST_HEAD(&rcu_torture_freelist); 2486 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2487 rcu_tortures[i].rtort_mbtest = 0; 2488 list_add_tail(&rcu_tortures[i].rtort_free, 2489 &rcu_torture_freelist); 2490 } 2491 2492 /* Initialize the statistics so that each run gets its own numbers. */ 2493 2494 rcu_torture_current = NULL; 2495 rcu_torture_current_version = 0; 2496 atomic_set(&n_rcu_torture_alloc, 0); 2497 atomic_set(&n_rcu_torture_alloc_fail, 0); 2498 atomic_set(&n_rcu_torture_free, 0); 2499 atomic_set(&n_rcu_torture_mberror, 0); 2500 atomic_set(&n_rcu_torture_error, 0); 2501 n_rcu_torture_barrier_error = 0; 2502 n_rcu_torture_boost_ktrerror = 0; 2503 n_rcu_torture_boost_rterror = 0; 2504 n_rcu_torture_boost_failure = 0; 2505 n_rcu_torture_boosts = 0; 2506 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2507 atomic_set(&rcu_torture_wcount[i], 0); 2508 for_each_possible_cpu(cpu) { 2509 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2510 per_cpu(rcu_torture_count, cpu)[i] = 0; 2511 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2512 } 2513 } 2514 err_segs_recorded = 0; 2515 rt_read_nsegs = 0; 2516 2517 /* Start up the kthreads. */ 2518 2519 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2520 writer_task); 2521 if (firsterr) 2522 goto unwind; 2523 if (nfakewriters > 0) { 2524 fakewriter_tasks = kcalloc(nfakewriters, 2525 sizeof(fakewriter_tasks[0]), 2526 GFP_KERNEL); 2527 if (fakewriter_tasks == NULL) { 2528 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2529 firsterr = -ENOMEM; 2530 goto unwind; 2531 } 2532 } 2533 for (i = 0; i < nfakewriters; i++) { 2534 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2535 NULL, fakewriter_tasks[i]); 2536 if (firsterr) 2537 goto unwind; 2538 } 2539 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2540 GFP_KERNEL); 2541 if (reader_tasks == NULL) { 2542 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2543 firsterr = -ENOMEM; 2544 goto unwind; 2545 } 2546 for (i = 0; i < nrealreaders; i++) { 2547 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2548 reader_tasks[i]); 2549 if (firsterr) 2550 goto unwind; 2551 } 2552 if (stat_interval > 0) { 2553 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2554 stats_task); 2555 if (firsterr) 2556 goto unwind; 2557 } 2558 if (test_no_idle_hz && shuffle_interval > 0) { 2559 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2560 if (firsterr) 2561 goto unwind; 2562 } 2563 if (stutter < 0) 2564 stutter = 0; 2565 if (stutter) { 2566 int t; 2567 2568 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2569 firsterr = torture_stutter_init(stutter * HZ, t); 2570 if (firsterr) 2571 goto unwind; 2572 } 2573 if (fqs_duration < 0) 2574 fqs_duration = 0; 2575 if (fqs_duration) { 2576 /* Create the fqs thread */ 2577 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2578 fqs_task); 2579 if (firsterr) 2580 goto unwind; 2581 } 2582 if (test_boost_interval < 1) 2583 test_boost_interval = 1; 2584 if (test_boost_duration < 2) 2585 test_boost_duration = 2; 2586 if (rcu_torture_can_boost()) { 2587 2588 boost_starttime = jiffies + test_boost_interval * HZ; 2589 2590 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2591 rcutorture_booster_init, 2592 rcutorture_booster_cleanup); 2593 if (firsterr < 0) 2594 goto unwind; 2595 rcutor_hp = firsterr; 2596 } 2597 shutdown_jiffies = jiffies + shutdown_secs * HZ; 2598 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2599 if (firsterr) 2600 goto unwind; 2601 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2602 rcutorture_sync); 2603 if (firsterr) 2604 goto unwind; 2605 firsterr = rcu_torture_stall_init(); 2606 if (firsterr) 2607 goto unwind; 2608 firsterr = rcu_torture_fwd_prog_init(); 2609 if (firsterr) 2610 goto unwind; 2611 firsterr = rcu_torture_barrier_init(); 2612 if (firsterr) 2613 goto unwind; 2614 if (object_debug) 2615 rcu_test_debug_objects(); 2616 torture_init_end(); 2617 return 0; 2618 2619 unwind: 2620 torture_init_end(); 2621 rcu_torture_cleanup(); 2622 return firsterr; 2623 } 2624 2625 module_init(rcu_torture_init); 2626 module_exit(rcu_torture_cleanup); 2627