1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 50 #include "rcu.h" 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 54 55 /* Bits for ->extendables field, extendables param, and related definitions. */ 56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 65 #define RCUTORTURE_MAX_EXTEND \ 66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 69 /* Must be power of two minus one. */ 70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 71 72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 74 torture_param(int, fqs_duration, 0, 75 "Duration of fqs bursts (us), 0 to disable"); 76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 80 torture_param(int, fwd_progress_holdoff, 60, 81 "Time between forward-progress tests (s)"); 82 torture_param(bool, fwd_progress_need_resched, 1, 83 "Hide cond_resched() behind need_resched()"); 84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 86 torture_param(bool, gp_normal, false, 87 "Use normal (non-expedited) GP wait primitives"); 88 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 89 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 90 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 91 torture_param(int, n_barrier_cbs, 0, 92 "# of callbacks/kthreads for barrier testing"); 93 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 94 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 95 torture_param(int, object_debug, 0, 96 "Enable debug-object double call_rcu() testing"); 97 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 98 torture_param(int, onoff_interval, 0, 99 "Time between CPU hotplugs (jiffies), 0=disable"); 100 torture_param(int, read_exit_delay, 13, 101 "Delay between read-then-exit episodes (s)"); 102 torture_param(int, read_exit_burst, 16, 103 "# of read-then-exit bursts per episode, zero to disable"); 104 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 105 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 106 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 107 torture_param(int, stall_cpu_holdoff, 10, 108 "Time to wait before starting stall (s)."); 109 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 110 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 111 torture_param(int, stall_gp_kthread, 0, 112 "Grace-period kthread stall duration (s)."); 113 torture_param(int, stat_interval, 60, 114 "Number of seconds between stats printk()s"); 115 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 116 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 117 torture_param(int, test_boost_duration, 4, 118 "Duration of each boost test, seconds."); 119 torture_param(int, test_boost_interval, 7, 120 "Interval between boost tests, seconds."); 121 torture_param(bool, test_no_idle_hz, true, 122 "Test support for tickless idle CPUs"); 123 torture_param(int, verbose, 1, 124 "Enable verbose debugging printk()s"); 125 126 static char *torture_type = "rcu"; 127 module_param(torture_type, charp, 0444); 128 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 129 130 static int nrealreaders; 131 static struct task_struct *writer_task; 132 static struct task_struct **fakewriter_tasks; 133 static struct task_struct **reader_tasks; 134 static struct task_struct *stats_task; 135 static struct task_struct *fqs_task; 136 static struct task_struct *boost_tasks[NR_CPUS]; 137 static struct task_struct *stall_task; 138 static struct task_struct *fwd_prog_task; 139 static struct task_struct **barrier_cbs_tasks; 140 static struct task_struct *barrier_task; 141 static struct task_struct *read_exit_task; 142 143 #define RCU_TORTURE_PIPE_LEN 10 144 145 struct rcu_torture { 146 struct rcu_head rtort_rcu; 147 int rtort_pipe_count; 148 struct list_head rtort_free; 149 int rtort_mbtest; 150 }; 151 152 static LIST_HEAD(rcu_torture_freelist); 153 static struct rcu_torture __rcu *rcu_torture_current; 154 static unsigned long rcu_torture_current_version; 155 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 156 static DEFINE_SPINLOCK(rcu_torture_lock); 157 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 158 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 159 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 160 static atomic_t n_rcu_torture_alloc; 161 static atomic_t n_rcu_torture_alloc_fail; 162 static atomic_t n_rcu_torture_free; 163 static atomic_t n_rcu_torture_mberror; 164 static atomic_t n_rcu_torture_error; 165 static long n_rcu_torture_barrier_error; 166 static long n_rcu_torture_boost_ktrerror; 167 static long n_rcu_torture_boost_rterror; 168 static long n_rcu_torture_boost_failure; 169 static long n_rcu_torture_boosts; 170 static atomic_long_t n_rcu_torture_timers; 171 static long n_barrier_attempts; 172 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 173 static unsigned long n_read_exits; 174 static struct list_head rcu_torture_removed; 175 static unsigned long shutdown_jiffies; 176 static unsigned long start_gp_seq; 177 178 static int rcu_torture_writer_state; 179 #define RTWS_FIXED_DELAY 0 180 #define RTWS_DELAY 1 181 #define RTWS_REPLACE 2 182 #define RTWS_DEF_FREE 3 183 #define RTWS_EXP_SYNC 4 184 #define RTWS_COND_GET 5 185 #define RTWS_COND_SYNC 6 186 #define RTWS_SYNC 7 187 #define RTWS_STUTTER 8 188 #define RTWS_STOPPING 9 189 static const char * const rcu_torture_writer_state_names[] = { 190 "RTWS_FIXED_DELAY", 191 "RTWS_DELAY", 192 "RTWS_REPLACE", 193 "RTWS_DEF_FREE", 194 "RTWS_EXP_SYNC", 195 "RTWS_COND_GET", 196 "RTWS_COND_SYNC", 197 "RTWS_SYNC", 198 "RTWS_STUTTER", 199 "RTWS_STOPPING", 200 }; 201 202 /* Record reader segment types and duration for first failing read. */ 203 struct rt_read_seg { 204 int rt_readstate; 205 unsigned long rt_delay_jiffies; 206 unsigned long rt_delay_ms; 207 unsigned long rt_delay_us; 208 bool rt_preempted; 209 }; 210 static int err_segs_recorded; 211 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 212 static int rt_read_nsegs; 213 214 static const char *rcu_torture_writer_state_getname(void) 215 { 216 unsigned int i = READ_ONCE(rcu_torture_writer_state); 217 218 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 219 return "???"; 220 return rcu_torture_writer_state_names[i]; 221 } 222 223 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 224 #define rcu_can_boost() 1 225 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 226 #define rcu_can_boost() 0 227 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 228 229 #ifdef CONFIG_RCU_TRACE 230 static u64 notrace rcu_trace_clock_local(void) 231 { 232 u64 ts = trace_clock_local(); 233 234 (void)do_div(ts, NSEC_PER_USEC); 235 return ts; 236 } 237 #else /* #ifdef CONFIG_RCU_TRACE */ 238 static u64 notrace rcu_trace_clock_local(void) 239 { 240 return 0ULL; 241 } 242 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 243 244 /* 245 * Stop aggressive CPU-hog tests a bit before the end of the test in order 246 * to avoid interfering with test shutdown. 247 */ 248 static bool shutdown_time_arrived(void) 249 { 250 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 251 } 252 253 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 254 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 255 /* and boost task create/destroy. */ 256 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 257 static bool barrier_phase; /* Test phase. */ 258 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 259 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 260 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 261 262 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 263 264 /* 265 * Allocate an element from the rcu_tortures pool. 266 */ 267 static struct rcu_torture * 268 rcu_torture_alloc(void) 269 { 270 struct list_head *p; 271 272 spin_lock_bh(&rcu_torture_lock); 273 if (list_empty(&rcu_torture_freelist)) { 274 atomic_inc(&n_rcu_torture_alloc_fail); 275 spin_unlock_bh(&rcu_torture_lock); 276 return NULL; 277 } 278 atomic_inc(&n_rcu_torture_alloc); 279 p = rcu_torture_freelist.next; 280 list_del_init(p); 281 spin_unlock_bh(&rcu_torture_lock); 282 return container_of(p, struct rcu_torture, rtort_free); 283 } 284 285 /* 286 * Free an element to the rcu_tortures pool. 287 */ 288 static void 289 rcu_torture_free(struct rcu_torture *p) 290 { 291 atomic_inc(&n_rcu_torture_free); 292 spin_lock_bh(&rcu_torture_lock); 293 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 294 spin_unlock_bh(&rcu_torture_lock); 295 } 296 297 /* 298 * Operations vector for selecting different types of tests. 299 */ 300 301 struct rcu_torture_ops { 302 int ttype; 303 void (*init)(void); 304 void (*cleanup)(void); 305 int (*readlock)(void); 306 void (*read_delay)(struct torture_random_state *rrsp, 307 struct rt_read_seg *rtrsp); 308 void (*readunlock)(int idx); 309 unsigned long (*get_gp_seq)(void); 310 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 311 void (*deferred_free)(struct rcu_torture *p); 312 void (*sync)(void); 313 void (*exp_sync)(void); 314 unsigned long (*get_state)(void); 315 void (*cond_sync)(unsigned long oldstate); 316 call_rcu_func_t call; 317 void (*cb_barrier)(void); 318 void (*fqs)(void); 319 void (*stats)(void); 320 int (*stall_dur)(void); 321 int irq_capable; 322 int can_boost; 323 int extendables; 324 int slow_gps; 325 const char *name; 326 }; 327 328 static struct rcu_torture_ops *cur_ops; 329 330 /* 331 * Definitions for rcu torture testing. 332 */ 333 334 static int rcu_torture_read_lock(void) __acquires(RCU) 335 { 336 rcu_read_lock(); 337 return 0; 338 } 339 340 static void 341 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 342 { 343 unsigned long started; 344 unsigned long completed; 345 const unsigned long shortdelay_us = 200; 346 unsigned long longdelay_ms = 300; 347 unsigned long long ts; 348 349 /* We want a short delay sometimes to make a reader delay the grace 350 * period, and we want a long delay occasionally to trigger 351 * force_quiescent_state. */ 352 353 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 354 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 355 started = cur_ops->get_gp_seq(); 356 ts = rcu_trace_clock_local(); 357 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 358 longdelay_ms = 5; /* Avoid triggering BH limits. */ 359 mdelay(longdelay_ms); 360 rtrsp->rt_delay_ms = longdelay_ms; 361 completed = cur_ops->get_gp_seq(); 362 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 363 started, completed); 364 } 365 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 366 udelay(shortdelay_us); 367 rtrsp->rt_delay_us = shortdelay_us; 368 } 369 if (!preempt_count() && 370 !(torture_random(rrsp) % (nrealreaders * 500))) { 371 torture_preempt_schedule(); /* QS only if preemptible. */ 372 rtrsp->rt_preempted = true; 373 } 374 } 375 376 static void rcu_torture_read_unlock(int idx) __releases(RCU) 377 { 378 rcu_read_unlock(); 379 } 380 381 /* 382 * Update callback in the pipe. This should be invoked after a grace period. 383 */ 384 static bool 385 rcu_torture_pipe_update_one(struct rcu_torture *rp) 386 { 387 int i; 388 389 i = READ_ONCE(rp->rtort_pipe_count); 390 if (i > RCU_TORTURE_PIPE_LEN) 391 i = RCU_TORTURE_PIPE_LEN; 392 atomic_inc(&rcu_torture_wcount[i]); 393 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 394 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 395 rp->rtort_mbtest = 0; 396 return true; 397 } 398 return false; 399 } 400 401 /* 402 * Update all callbacks in the pipe. Suitable for synchronous grace-period 403 * primitives. 404 */ 405 static void 406 rcu_torture_pipe_update(struct rcu_torture *old_rp) 407 { 408 struct rcu_torture *rp; 409 struct rcu_torture *rp1; 410 411 if (old_rp) 412 list_add(&old_rp->rtort_free, &rcu_torture_removed); 413 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 414 if (rcu_torture_pipe_update_one(rp)) { 415 list_del(&rp->rtort_free); 416 rcu_torture_free(rp); 417 } 418 } 419 } 420 421 static void 422 rcu_torture_cb(struct rcu_head *p) 423 { 424 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 425 426 if (torture_must_stop_irq()) { 427 /* Test is ending, just drop callbacks on the floor. */ 428 /* The next initialization will pick up the pieces. */ 429 return; 430 } 431 if (rcu_torture_pipe_update_one(rp)) 432 rcu_torture_free(rp); 433 else 434 cur_ops->deferred_free(rp); 435 } 436 437 static unsigned long rcu_no_completed(void) 438 { 439 return 0; 440 } 441 442 static void rcu_torture_deferred_free(struct rcu_torture *p) 443 { 444 call_rcu(&p->rtort_rcu, rcu_torture_cb); 445 } 446 447 static void rcu_sync_torture_init(void) 448 { 449 INIT_LIST_HEAD(&rcu_torture_removed); 450 } 451 452 static struct rcu_torture_ops rcu_ops = { 453 .ttype = RCU_FLAVOR, 454 .init = rcu_sync_torture_init, 455 .readlock = rcu_torture_read_lock, 456 .read_delay = rcu_read_delay, 457 .readunlock = rcu_torture_read_unlock, 458 .get_gp_seq = rcu_get_gp_seq, 459 .gp_diff = rcu_seq_diff, 460 .deferred_free = rcu_torture_deferred_free, 461 .sync = synchronize_rcu, 462 .exp_sync = synchronize_rcu_expedited, 463 .get_state = get_state_synchronize_rcu, 464 .cond_sync = cond_synchronize_rcu, 465 .call = call_rcu, 466 .cb_barrier = rcu_barrier, 467 .fqs = rcu_force_quiescent_state, 468 .stats = NULL, 469 .stall_dur = rcu_jiffies_till_stall_check, 470 .irq_capable = 1, 471 .can_boost = rcu_can_boost(), 472 .extendables = RCUTORTURE_MAX_EXTEND, 473 .name = "rcu" 474 }; 475 476 /* 477 * Don't even think about trying any of these in real life!!! 478 * The names includes "busted", and they really means it! 479 * The only purpose of these functions is to provide a buggy RCU 480 * implementation to make sure that rcutorture correctly emits 481 * buggy-RCU error messages. 482 */ 483 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 484 { 485 /* This is a deliberate bug for testing purposes only! */ 486 rcu_torture_cb(&p->rtort_rcu); 487 } 488 489 static void synchronize_rcu_busted(void) 490 { 491 /* This is a deliberate bug for testing purposes only! */ 492 } 493 494 static void 495 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 496 { 497 /* This is a deliberate bug for testing purposes only! */ 498 func(head); 499 } 500 501 static struct rcu_torture_ops rcu_busted_ops = { 502 .ttype = INVALID_RCU_FLAVOR, 503 .init = rcu_sync_torture_init, 504 .readlock = rcu_torture_read_lock, 505 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 506 .readunlock = rcu_torture_read_unlock, 507 .get_gp_seq = rcu_no_completed, 508 .deferred_free = rcu_busted_torture_deferred_free, 509 .sync = synchronize_rcu_busted, 510 .exp_sync = synchronize_rcu_busted, 511 .call = call_rcu_busted, 512 .cb_barrier = NULL, 513 .fqs = NULL, 514 .stats = NULL, 515 .irq_capable = 1, 516 .name = "busted" 517 }; 518 519 /* 520 * Definitions for srcu torture testing. 521 */ 522 523 DEFINE_STATIC_SRCU(srcu_ctl); 524 static struct srcu_struct srcu_ctld; 525 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 526 527 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 528 { 529 return srcu_read_lock(srcu_ctlp); 530 } 531 532 static void 533 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 534 { 535 long delay; 536 const long uspertick = 1000000 / HZ; 537 const long longdelay = 10; 538 539 /* We want there to be long-running readers, but not all the time. */ 540 541 delay = torture_random(rrsp) % 542 (nrealreaders * 2 * longdelay * uspertick); 543 if (!delay && in_task()) { 544 schedule_timeout_interruptible(longdelay); 545 rtrsp->rt_delay_jiffies = longdelay; 546 } else { 547 rcu_read_delay(rrsp, rtrsp); 548 } 549 } 550 551 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 552 { 553 srcu_read_unlock(srcu_ctlp, idx); 554 } 555 556 static unsigned long srcu_torture_completed(void) 557 { 558 return srcu_batches_completed(srcu_ctlp); 559 } 560 561 static void srcu_torture_deferred_free(struct rcu_torture *rp) 562 { 563 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 564 } 565 566 static void srcu_torture_synchronize(void) 567 { 568 synchronize_srcu(srcu_ctlp); 569 } 570 571 static void srcu_torture_call(struct rcu_head *head, 572 rcu_callback_t func) 573 { 574 call_srcu(srcu_ctlp, head, func); 575 } 576 577 static void srcu_torture_barrier(void) 578 { 579 srcu_barrier(srcu_ctlp); 580 } 581 582 static void srcu_torture_stats(void) 583 { 584 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 585 } 586 587 static void srcu_torture_synchronize_expedited(void) 588 { 589 synchronize_srcu_expedited(srcu_ctlp); 590 } 591 592 static struct rcu_torture_ops srcu_ops = { 593 .ttype = SRCU_FLAVOR, 594 .init = rcu_sync_torture_init, 595 .readlock = srcu_torture_read_lock, 596 .read_delay = srcu_read_delay, 597 .readunlock = srcu_torture_read_unlock, 598 .get_gp_seq = srcu_torture_completed, 599 .deferred_free = srcu_torture_deferred_free, 600 .sync = srcu_torture_synchronize, 601 .exp_sync = srcu_torture_synchronize_expedited, 602 .call = srcu_torture_call, 603 .cb_barrier = srcu_torture_barrier, 604 .stats = srcu_torture_stats, 605 .irq_capable = 1, 606 .name = "srcu" 607 }; 608 609 static void srcu_torture_init(void) 610 { 611 rcu_sync_torture_init(); 612 WARN_ON(init_srcu_struct(&srcu_ctld)); 613 srcu_ctlp = &srcu_ctld; 614 } 615 616 static void srcu_torture_cleanup(void) 617 { 618 cleanup_srcu_struct(&srcu_ctld); 619 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 620 } 621 622 /* As above, but dynamically allocated. */ 623 static struct rcu_torture_ops srcud_ops = { 624 .ttype = SRCU_FLAVOR, 625 .init = srcu_torture_init, 626 .cleanup = srcu_torture_cleanup, 627 .readlock = srcu_torture_read_lock, 628 .read_delay = srcu_read_delay, 629 .readunlock = srcu_torture_read_unlock, 630 .get_gp_seq = srcu_torture_completed, 631 .deferred_free = srcu_torture_deferred_free, 632 .sync = srcu_torture_synchronize, 633 .exp_sync = srcu_torture_synchronize_expedited, 634 .call = srcu_torture_call, 635 .cb_barrier = srcu_torture_barrier, 636 .stats = srcu_torture_stats, 637 .irq_capable = 1, 638 .name = "srcud" 639 }; 640 641 /* As above, but broken due to inappropriate reader extension. */ 642 static struct rcu_torture_ops busted_srcud_ops = { 643 .ttype = SRCU_FLAVOR, 644 .init = srcu_torture_init, 645 .cleanup = srcu_torture_cleanup, 646 .readlock = srcu_torture_read_lock, 647 .read_delay = rcu_read_delay, 648 .readunlock = srcu_torture_read_unlock, 649 .get_gp_seq = srcu_torture_completed, 650 .deferred_free = srcu_torture_deferred_free, 651 .sync = srcu_torture_synchronize, 652 .exp_sync = srcu_torture_synchronize_expedited, 653 .call = srcu_torture_call, 654 .cb_barrier = srcu_torture_barrier, 655 .stats = srcu_torture_stats, 656 .irq_capable = 1, 657 .extendables = RCUTORTURE_MAX_EXTEND, 658 .name = "busted_srcud" 659 }; 660 661 /* 662 * Definitions for RCU-tasks torture testing. 663 */ 664 665 static int tasks_torture_read_lock(void) 666 { 667 return 0; 668 } 669 670 static void tasks_torture_read_unlock(int idx) 671 { 672 } 673 674 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 675 { 676 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 677 } 678 679 static void synchronize_rcu_mult_test(void) 680 { 681 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 682 } 683 684 static struct rcu_torture_ops tasks_ops = { 685 .ttype = RCU_TASKS_FLAVOR, 686 .init = rcu_sync_torture_init, 687 .readlock = tasks_torture_read_lock, 688 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 689 .readunlock = tasks_torture_read_unlock, 690 .get_gp_seq = rcu_no_completed, 691 .deferred_free = rcu_tasks_torture_deferred_free, 692 .sync = synchronize_rcu_tasks, 693 .exp_sync = synchronize_rcu_mult_test, 694 .call = call_rcu_tasks, 695 .cb_barrier = rcu_barrier_tasks, 696 .fqs = NULL, 697 .stats = NULL, 698 .irq_capable = 1, 699 .slow_gps = 1, 700 .name = "tasks" 701 }; 702 703 /* 704 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 705 * This implementation does not necessarily work well with CPU hotplug. 706 */ 707 708 static void synchronize_rcu_trivial(void) 709 { 710 int cpu; 711 712 for_each_online_cpu(cpu) { 713 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 714 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 715 } 716 } 717 718 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 719 { 720 preempt_disable(); 721 return 0; 722 } 723 724 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 725 { 726 preempt_enable(); 727 } 728 729 static struct rcu_torture_ops trivial_ops = { 730 .ttype = RCU_TRIVIAL_FLAVOR, 731 .init = rcu_sync_torture_init, 732 .readlock = rcu_torture_read_lock_trivial, 733 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 734 .readunlock = rcu_torture_read_unlock_trivial, 735 .get_gp_seq = rcu_no_completed, 736 .sync = synchronize_rcu_trivial, 737 .exp_sync = synchronize_rcu_trivial, 738 .fqs = NULL, 739 .stats = NULL, 740 .irq_capable = 1, 741 .name = "trivial" 742 }; 743 744 /* 745 * Definitions for rude RCU-tasks torture testing. 746 */ 747 748 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 749 { 750 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 751 } 752 753 static struct rcu_torture_ops tasks_rude_ops = { 754 .ttype = RCU_TASKS_RUDE_FLAVOR, 755 .init = rcu_sync_torture_init, 756 .readlock = rcu_torture_read_lock_trivial, 757 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 758 .readunlock = rcu_torture_read_unlock_trivial, 759 .get_gp_seq = rcu_no_completed, 760 .deferred_free = rcu_tasks_rude_torture_deferred_free, 761 .sync = synchronize_rcu_tasks_rude, 762 .exp_sync = synchronize_rcu_tasks_rude, 763 .call = call_rcu_tasks_rude, 764 .cb_barrier = rcu_barrier_tasks_rude, 765 .fqs = NULL, 766 .stats = NULL, 767 .irq_capable = 1, 768 .name = "tasks-rude" 769 }; 770 771 /* 772 * Definitions for tracing RCU-tasks torture testing. 773 */ 774 775 static int tasks_tracing_torture_read_lock(void) 776 { 777 rcu_read_lock_trace(); 778 return 0; 779 } 780 781 static void tasks_tracing_torture_read_unlock(int idx) 782 { 783 rcu_read_unlock_trace(); 784 } 785 786 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 787 { 788 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 789 } 790 791 static struct rcu_torture_ops tasks_tracing_ops = { 792 .ttype = RCU_TASKS_TRACING_FLAVOR, 793 .init = rcu_sync_torture_init, 794 .readlock = tasks_tracing_torture_read_lock, 795 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 796 .readunlock = tasks_tracing_torture_read_unlock, 797 .get_gp_seq = rcu_no_completed, 798 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 799 .sync = synchronize_rcu_tasks_trace, 800 .exp_sync = synchronize_rcu_tasks_trace, 801 .call = call_rcu_tasks_trace, 802 .cb_barrier = rcu_barrier_tasks_trace, 803 .fqs = NULL, 804 .stats = NULL, 805 .irq_capable = 1, 806 .slow_gps = 1, 807 .name = "tasks-tracing" 808 }; 809 810 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 811 { 812 if (!cur_ops->gp_diff) 813 return new - old; 814 return cur_ops->gp_diff(new, old); 815 } 816 817 static bool __maybe_unused torturing_tasks(void) 818 { 819 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 820 } 821 822 /* 823 * RCU torture priority-boost testing. Runs one real-time thread per 824 * CPU for moderate bursts, repeatedly registering RCU callbacks and 825 * spinning waiting for them to be invoked. If a given callback takes 826 * too long to be invoked, we assume that priority inversion has occurred. 827 */ 828 829 struct rcu_boost_inflight { 830 struct rcu_head rcu; 831 int inflight; 832 }; 833 834 static void rcu_torture_boost_cb(struct rcu_head *head) 835 { 836 struct rcu_boost_inflight *rbip = 837 container_of(head, struct rcu_boost_inflight, rcu); 838 839 /* Ensure RCU-core accesses precede clearing ->inflight */ 840 smp_store_release(&rbip->inflight, 0); 841 } 842 843 static int old_rt_runtime = -1; 844 845 static void rcu_torture_disable_rt_throttle(void) 846 { 847 /* 848 * Disable RT throttling so that rcutorture's boost threads don't get 849 * throttled. Only possible if rcutorture is built-in otherwise the 850 * user should manually do this by setting the sched_rt_period_us and 851 * sched_rt_runtime sysctls. 852 */ 853 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 854 return; 855 856 old_rt_runtime = sysctl_sched_rt_runtime; 857 sysctl_sched_rt_runtime = -1; 858 } 859 860 static void rcu_torture_enable_rt_throttle(void) 861 { 862 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 863 return; 864 865 sysctl_sched_rt_runtime = old_rt_runtime; 866 old_rt_runtime = -1; 867 } 868 869 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 870 { 871 if (end - start > test_boost_duration * HZ - HZ / 2) { 872 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 873 n_rcu_torture_boost_failure++; 874 875 return true; /* failed */ 876 } 877 878 return false; /* passed */ 879 } 880 881 static int rcu_torture_boost(void *arg) 882 { 883 unsigned long call_rcu_time; 884 unsigned long endtime; 885 unsigned long oldstarttime; 886 struct rcu_boost_inflight rbi = { .inflight = 0 }; 887 888 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 889 890 /* Set real-time priority. */ 891 sched_set_fifo_low(current); 892 893 init_rcu_head_on_stack(&rbi.rcu); 894 /* Each pass through the following loop does one boost-test cycle. */ 895 do { 896 /* Track if the test failed already in this test interval? */ 897 bool failed = false; 898 899 /* Increment n_rcu_torture_boosts once per boost-test */ 900 while (!kthread_should_stop()) { 901 if (mutex_trylock(&boost_mutex)) { 902 n_rcu_torture_boosts++; 903 mutex_unlock(&boost_mutex); 904 break; 905 } 906 schedule_timeout_uninterruptible(1); 907 } 908 if (kthread_should_stop()) 909 goto checkwait; 910 911 /* Wait for the next test interval. */ 912 oldstarttime = boost_starttime; 913 while (time_before(jiffies, oldstarttime)) { 914 schedule_timeout_interruptible(oldstarttime - jiffies); 915 stutter_wait("rcu_torture_boost"); 916 if (torture_must_stop()) 917 goto checkwait; 918 } 919 920 /* Do one boost-test interval. */ 921 endtime = oldstarttime + test_boost_duration * HZ; 922 call_rcu_time = jiffies; 923 while (time_before(jiffies, endtime)) { 924 /* If we don't have a callback in flight, post one. */ 925 if (!smp_load_acquire(&rbi.inflight)) { 926 /* RCU core before ->inflight = 1. */ 927 smp_store_release(&rbi.inflight, 1); 928 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 929 /* Check if the boost test failed */ 930 failed = failed || 931 rcu_torture_boost_failed(call_rcu_time, 932 jiffies); 933 call_rcu_time = jiffies; 934 } 935 stutter_wait("rcu_torture_boost"); 936 if (torture_must_stop()) 937 goto checkwait; 938 } 939 940 /* 941 * If boost never happened, then inflight will always be 1, in 942 * this case the boost check would never happen in the above 943 * loop so do another one here. 944 */ 945 if (!failed && smp_load_acquire(&rbi.inflight)) 946 rcu_torture_boost_failed(call_rcu_time, jiffies); 947 948 /* 949 * Set the start time of the next test interval. 950 * Yes, this is vulnerable to long delays, but such 951 * delays simply cause a false negative for the next 952 * interval. Besides, we are running at RT priority, 953 * so delays should be relatively rare. 954 */ 955 while (oldstarttime == boost_starttime && 956 !kthread_should_stop()) { 957 if (mutex_trylock(&boost_mutex)) { 958 boost_starttime = jiffies + 959 test_boost_interval * HZ; 960 mutex_unlock(&boost_mutex); 961 break; 962 } 963 schedule_timeout_uninterruptible(1); 964 } 965 966 /* Go do the stutter. */ 967 checkwait: stutter_wait("rcu_torture_boost"); 968 } while (!torture_must_stop()); 969 970 /* Clean up and exit. */ 971 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 972 torture_shutdown_absorb("rcu_torture_boost"); 973 schedule_timeout_uninterruptible(1); 974 } 975 destroy_rcu_head_on_stack(&rbi.rcu); 976 torture_kthread_stopping("rcu_torture_boost"); 977 return 0; 978 } 979 980 /* 981 * RCU torture force-quiescent-state kthread. Repeatedly induces 982 * bursts of calls to force_quiescent_state(), increasing the probability 983 * of occurrence of some important types of race conditions. 984 */ 985 static int 986 rcu_torture_fqs(void *arg) 987 { 988 unsigned long fqs_resume_time; 989 int fqs_burst_remaining; 990 991 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 992 do { 993 fqs_resume_time = jiffies + fqs_stutter * HZ; 994 while (time_before(jiffies, fqs_resume_time) && 995 !kthread_should_stop()) { 996 schedule_timeout_interruptible(1); 997 } 998 fqs_burst_remaining = fqs_duration; 999 while (fqs_burst_remaining > 0 && 1000 !kthread_should_stop()) { 1001 cur_ops->fqs(); 1002 udelay(fqs_holdoff); 1003 fqs_burst_remaining -= fqs_holdoff; 1004 } 1005 stutter_wait("rcu_torture_fqs"); 1006 } while (!torture_must_stop()); 1007 torture_kthread_stopping("rcu_torture_fqs"); 1008 return 0; 1009 } 1010 1011 /* 1012 * RCU torture writer kthread. Repeatedly substitutes a new structure 1013 * for that pointed to by rcu_torture_current, freeing the old structure 1014 * after a series of grace periods (the "pipeline"). 1015 */ 1016 static int 1017 rcu_torture_writer(void *arg) 1018 { 1019 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1020 int expediting = 0; 1021 unsigned long gp_snap; 1022 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1023 bool gp_sync1 = gp_sync; 1024 int i; 1025 struct rcu_torture *rp; 1026 struct rcu_torture *old_rp; 1027 static DEFINE_TORTURE_RANDOM(rand); 1028 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 1029 RTWS_COND_GET, RTWS_SYNC }; 1030 int nsynctypes = 0; 1031 1032 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1033 if (!can_expedite) 1034 pr_alert("%s" TORTURE_FLAG 1035 " GP expediting controlled from boot/sysfs for %s.\n", 1036 torture_type, cur_ops->name); 1037 1038 /* Initialize synctype[] array. If none set, take default. */ 1039 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 1040 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 1041 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 1042 synctype[nsynctypes++] = RTWS_COND_GET; 1043 pr_info("%s: Testing conditional GPs.\n", __func__); 1044 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 1045 pr_alert("%s: gp_cond without primitives.\n", __func__); 1046 } 1047 if (gp_exp1 && cur_ops->exp_sync) { 1048 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1049 pr_info("%s: Testing expedited GPs.\n", __func__); 1050 } else if (gp_exp && !cur_ops->exp_sync) { 1051 pr_alert("%s: gp_exp without primitives.\n", __func__); 1052 } 1053 if (gp_normal1 && cur_ops->deferred_free) { 1054 synctype[nsynctypes++] = RTWS_DEF_FREE; 1055 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1056 } else if (gp_normal && !cur_ops->deferred_free) { 1057 pr_alert("%s: gp_normal without primitives.\n", __func__); 1058 } 1059 if (gp_sync1 && cur_ops->sync) { 1060 synctype[nsynctypes++] = RTWS_SYNC; 1061 pr_info("%s: Testing normal GPs.\n", __func__); 1062 } else if (gp_sync && !cur_ops->sync) { 1063 pr_alert("%s: gp_sync without primitives.\n", __func__); 1064 } 1065 if (WARN_ONCE(nsynctypes == 0, 1066 "rcu_torture_writer: No update-side primitives.\n")) { 1067 /* 1068 * No updates primitives, so don't try updating. 1069 * The resulting test won't be testing much, hence the 1070 * above WARN_ONCE(). 1071 */ 1072 rcu_torture_writer_state = RTWS_STOPPING; 1073 torture_kthread_stopping("rcu_torture_writer"); 1074 } 1075 1076 do { 1077 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1078 schedule_timeout_uninterruptible(1); 1079 rp = rcu_torture_alloc(); 1080 if (rp == NULL) 1081 continue; 1082 rp->rtort_pipe_count = 0; 1083 rcu_torture_writer_state = RTWS_DELAY; 1084 udelay(torture_random(&rand) & 0x3ff); 1085 rcu_torture_writer_state = RTWS_REPLACE; 1086 old_rp = rcu_dereference_check(rcu_torture_current, 1087 current == writer_task); 1088 rp->rtort_mbtest = 1; 1089 rcu_assign_pointer(rcu_torture_current, rp); 1090 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1091 if (old_rp) { 1092 i = old_rp->rtort_pipe_count; 1093 if (i > RCU_TORTURE_PIPE_LEN) 1094 i = RCU_TORTURE_PIPE_LEN; 1095 atomic_inc(&rcu_torture_wcount[i]); 1096 WRITE_ONCE(old_rp->rtort_pipe_count, 1097 old_rp->rtort_pipe_count + 1); 1098 switch (synctype[torture_random(&rand) % nsynctypes]) { 1099 case RTWS_DEF_FREE: 1100 rcu_torture_writer_state = RTWS_DEF_FREE; 1101 cur_ops->deferred_free(old_rp); 1102 break; 1103 case RTWS_EXP_SYNC: 1104 rcu_torture_writer_state = RTWS_EXP_SYNC; 1105 cur_ops->exp_sync(); 1106 rcu_torture_pipe_update(old_rp); 1107 break; 1108 case RTWS_COND_GET: 1109 rcu_torture_writer_state = RTWS_COND_GET; 1110 gp_snap = cur_ops->get_state(); 1111 i = torture_random(&rand) % 16; 1112 if (i != 0) 1113 schedule_timeout_interruptible(i); 1114 udelay(torture_random(&rand) % 1000); 1115 rcu_torture_writer_state = RTWS_COND_SYNC; 1116 cur_ops->cond_sync(gp_snap); 1117 rcu_torture_pipe_update(old_rp); 1118 break; 1119 case RTWS_SYNC: 1120 rcu_torture_writer_state = RTWS_SYNC; 1121 cur_ops->sync(); 1122 rcu_torture_pipe_update(old_rp); 1123 break; 1124 default: 1125 WARN_ON_ONCE(1); 1126 break; 1127 } 1128 } 1129 WRITE_ONCE(rcu_torture_current_version, 1130 rcu_torture_current_version + 1); 1131 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1132 if (can_expedite && 1133 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1134 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1135 if (expediting >= 0) 1136 rcu_expedite_gp(); 1137 else 1138 rcu_unexpedite_gp(); 1139 if (++expediting > 3) 1140 expediting = -expediting; 1141 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1142 can_expedite = !rcu_gp_is_expedited() && 1143 !rcu_gp_is_normal(); 1144 } 1145 rcu_torture_writer_state = RTWS_STUTTER; 1146 if (stutter_wait("rcu_torture_writer") && 1147 !READ_ONCE(rcu_fwd_cb_nodelay) && 1148 !cur_ops->slow_gps && 1149 !torture_must_stop() && 1150 rcu_inkernel_boot_has_ended()) 1151 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1152 if (list_empty(&rcu_tortures[i].rtort_free) && 1153 rcu_access_pointer(rcu_torture_current) != 1154 &rcu_tortures[i]) { 1155 rcu_ftrace_dump(DUMP_ALL); 1156 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1157 } 1158 } while (!torture_must_stop()); 1159 rcu_torture_current = NULL; // Let stats task know that we are done. 1160 /* Reset expediting back to unexpedited. */ 1161 if (expediting > 0) 1162 expediting = -expediting; 1163 while (can_expedite && expediting++ < 0) 1164 rcu_unexpedite_gp(); 1165 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1166 if (!can_expedite) 1167 pr_alert("%s" TORTURE_FLAG 1168 " Dynamic grace-period expediting was disabled.\n", 1169 torture_type); 1170 rcu_torture_writer_state = RTWS_STOPPING; 1171 torture_kthread_stopping("rcu_torture_writer"); 1172 return 0; 1173 } 1174 1175 /* 1176 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1177 * delay between calls. 1178 */ 1179 static int 1180 rcu_torture_fakewriter(void *arg) 1181 { 1182 DEFINE_TORTURE_RANDOM(rand); 1183 1184 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1185 set_user_nice(current, MAX_NICE); 1186 1187 do { 1188 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1189 udelay(torture_random(&rand) & 0x3ff); 1190 if (cur_ops->cb_barrier != NULL && 1191 torture_random(&rand) % (nfakewriters * 8) == 0) { 1192 cur_ops->cb_barrier(); 1193 } else if (gp_normal == gp_exp) { 1194 if (cur_ops->sync && torture_random(&rand) & 0x80) 1195 cur_ops->sync(); 1196 else if (cur_ops->exp_sync) 1197 cur_ops->exp_sync(); 1198 } else if (gp_normal && cur_ops->sync) { 1199 cur_ops->sync(); 1200 } else if (cur_ops->exp_sync) { 1201 cur_ops->exp_sync(); 1202 } 1203 stutter_wait("rcu_torture_fakewriter"); 1204 } while (!torture_must_stop()); 1205 1206 torture_kthread_stopping("rcu_torture_fakewriter"); 1207 return 0; 1208 } 1209 1210 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1211 { 1212 kfree(rhp); 1213 } 1214 1215 /* 1216 * Do one extension of an RCU read-side critical section using the 1217 * current reader state in readstate (set to zero for initial entry 1218 * to extended critical section), set the new state as specified by 1219 * newstate (set to zero for final exit from extended critical section), 1220 * and random-number-generator state in trsp. If this is neither the 1221 * beginning or end of the critical section and if there was actually a 1222 * change, do a ->read_delay(). 1223 */ 1224 static void rcutorture_one_extend(int *readstate, int newstate, 1225 struct torture_random_state *trsp, 1226 struct rt_read_seg *rtrsp) 1227 { 1228 unsigned long flags; 1229 int idxnew = -1; 1230 int idxold = *readstate; 1231 int statesnew = ~*readstate & newstate; 1232 int statesold = *readstate & ~newstate; 1233 1234 WARN_ON_ONCE(idxold < 0); 1235 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1236 rtrsp->rt_readstate = newstate; 1237 1238 /* First, put new protection in place to avoid critical-section gap. */ 1239 if (statesnew & RCUTORTURE_RDR_BH) 1240 local_bh_disable(); 1241 if (statesnew & RCUTORTURE_RDR_IRQ) 1242 local_irq_disable(); 1243 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1244 preempt_disable(); 1245 if (statesnew & RCUTORTURE_RDR_RBH) 1246 rcu_read_lock_bh(); 1247 if (statesnew & RCUTORTURE_RDR_SCHED) 1248 rcu_read_lock_sched(); 1249 if (statesnew & RCUTORTURE_RDR_RCU) 1250 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1251 1252 /* Next, remove old protection, irq first due to bh conflict. */ 1253 if (statesold & RCUTORTURE_RDR_IRQ) 1254 local_irq_enable(); 1255 if (statesold & RCUTORTURE_RDR_BH) 1256 local_bh_enable(); 1257 if (statesold & RCUTORTURE_RDR_PREEMPT) 1258 preempt_enable(); 1259 if (statesold & RCUTORTURE_RDR_RBH) 1260 rcu_read_unlock_bh(); 1261 if (statesold & RCUTORTURE_RDR_SCHED) 1262 rcu_read_unlock_sched(); 1263 if (statesold & RCUTORTURE_RDR_RCU) { 1264 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1265 1266 if (lockit) 1267 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1268 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1269 if (lockit) 1270 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1271 } 1272 1273 /* Delay if neither beginning nor end and there was a change. */ 1274 if ((statesnew || statesold) && *readstate && newstate) 1275 cur_ops->read_delay(trsp, rtrsp); 1276 1277 /* Update the reader state. */ 1278 if (idxnew == -1) 1279 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1280 WARN_ON_ONCE(idxnew < 0); 1281 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1282 *readstate = idxnew | newstate; 1283 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1284 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1285 } 1286 1287 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1288 static int rcutorture_extend_mask_max(void) 1289 { 1290 int mask; 1291 1292 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1293 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1294 mask = mask | RCUTORTURE_RDR_RCU; 1295 return mask; 1296 } 1297 1298 /* Return a random protection state mask, but with at least one bit set. */ 1299 static int 1300 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1301 { 1302 int mask = rcutorture_extend_mask_max(); 1303 unsigned long randmask1 = torture_random(trsp) >> 8; 1304 unsigned long randmask2 = randmask1 >> 3; 1305 1306 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1307 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1308 if (!(randmask1 & 0x7)) 1309 mask = mask & randmask2; 1310 else 1311 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1312 /* Can't enable bh w/irq disabled. */ 1313 if ((mask & RCUTORTURE_RDR_IRQ) && 1314 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1315 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1316 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1317 return mask ?: RCUTORTURE_RDR_RCU; 1318 } 1319 1320 /* 1321 * Do a randomly selected number of extensions of an existing RCU read-side 1322 * critical section. 1323 */ 1324 static struct rt_read_seg * 1325 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1326 struct rt_read_seg *rtrsp) 1327 { 1328 int i; 1329 int j; 1330 int mask = rcutorture_extend_mask_max(); 1331 1332 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1333 if (!((mask - 1) & mask)) 1334 return rtrsp; /* Current RCU reader not extendable. */ 1335 /* Bias towards larger numbers of loops. */ 1336 i = (torture_random(trsp) >> 3); 1337 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1338 for (j = 0; j < i; j++) { 1339 mask = rcutorture_extend_mask(*readstate, trsp); 1340 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1341 } 1342 return &rtrsp[j]; 1343 } 1344 1345 /* 1346 * Do one read-side critical section, returning false if there was 1347 * no data to read. Can be invoked both from process context and 1348 * from a timer handler. 1349 */ 1350 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1351 { 1352 int i; 1353 unsigned long started; 1354 unsigned long completed; 1355 int newstate; 1356 struct rcu_torture *p; 1357 int pipe_count; 1358 int readstate = 0; 1359 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1360 struct rt_read_seg *rtrsp = &rtseg[0]; 1361 struct rt_read_seg *rtrsp1; 1362 unsigned long long ts; 1363 1364 WARN_ON_ONCE(!rcu_is_watching()); 1365 newstate = rcutorture_extend_mask(readstate, trsp); 1366 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1367 started = cur_ops->get_gp_seq(); 1368 ts = rcu_trace_clock_local(); 1369 p = rcu_dereference_check(rcu_torture_current, 1370 rcu_read_lock_bh_held() || 1371 rcu_read_lock_sched_held() || 1372 srcu_read_lock_held(srcu_ctlp) || 1373 rcu_read_lock_trace_held() || 1374 torturing_tasks()); 1375 if (p == NULL) { 1376 /* Wait for rcu_torture_writer to get underway */ 1377 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1378 return false; 1379 } 1380 if (p->rtort_mbtest == 0) 1381 atomic_inc(&n_rcu_torture_mberror); 1382 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1383 preempt_disable(); 1384 pipe_count = READ_ONCE(p->rtort_pipe_count); 1385 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1386 /* Should not happen, but... */ 1387 pipe_count = RCU_TORTURE_PIPE_LEN; 1388 } 1389 completed = cur_ops->get_gp_seq(); 1390 if (pipe_count > 1) { 1391 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1392 ts, started, completed); 1393 rcu_ftrace_dump(DUMP_ALL); 1394 } 1395 __this_cpu_inc(rcu_torture_count[pipe_count]); 1396 completed = rcutorture_seq_diff(completed, started); 1397 if (completed > RCU_TORTURE_PIPE_LEN) { 1398 /* Should not happen, but... */ 1399 completed = RCU_TORTURE_PIPE_LEN; 1400 } 1401 __this_cpu_inc(rcu_torture_batch[completed]); 1402 preempt_enable(); 1403 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1404 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1405 // This next splat is expected behavior if leakpointer, especially 1406 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 1407 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 1408 1409 /* If error or close call, record the sequence of reader protections. */ 1410 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1411 i = 0; 1412 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1413 err_segs[i++] = *rtrsp1; 1414 rt_read_nsegs = i; 1415 } 1416 1417 return true; 1418 } 1419 1420 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1421 1422 /* 1423 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1424 * incrementing the corresponding element of the pipeline array. The 1425 * counter in the element should never be greater than 1, otherwise, the 1426 * RCU implementation is broken. 1427 */ 1428 static void rcu_torture_timer(struct timer_list *unused) 1429 { 1430 atomic_long_inc(&n_rcu_torture_timers); 1431 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1432 1433 /* Test call_rcu() invocation from interrupt handler. */ 1434 if (cur_ops->call) { 1435 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1436 1437 if (rhp) 1438 cur_ops->call(rhp, rcu_torture_timer_cb); 1439 } 1440 } 1441 1442 /* 1443 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1444 * incrementing the corresponding element of the pipeline array. The 1445 * counter in the element should never be greater than 1, otherwise, the 1446 * RCU implementation is broken. 1447 */ 1448 static int 1449 rcu_torture_reader(void *arg) 1450 { 1451 unsigned long lastsleep = jiffies; 1452 long myid = (long)arg; 1453 int mynumonline = myid; 1454 DEFINE_TORTURE_RANDOM(rand); 1455 struct timer_list t; 1456 1457 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1458 set_user_nice(current, MAX_NICE); 1459 if (irqreader && cur_ops->irq_capable) 1460 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1461 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1462 do { 1463 if (irqreader && cur_ops->irq_capable) { 1464 if (!timer_pending(&t)) 1465 mod_timer(&t, jiffies + 1); 1466 } 1467 if (!rcu_torture_one_read(&rand) && !torture_must_stop()) 1468 schedule_timeout_interruptible(HZ); 1469 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1470 schedule_timeout_interruptible(1); 1471 lastsleep = jiffies + 10; 1472 } 1473 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1474 schedule_timeout_interruptible(HZ / 5); 1475 stutter_wait("rcu_torture_reader"); 1476 } while (!torture_must_stop()); 1477 if (irqreader && cur_ops->irq_capable) { 1478 del_timer_sync(&t); 1479 destroy_timer_on_stack(&t); 1480 } 1481 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1482 torture_kthread_stopping("rcu_torture_reader"); 1483 return 0; 1484 } 1485 1486 /* 1487 * Print torture statistics. Caller must ensure that there is only 1488 * one call to this function at a given time!!! This is normally 1489 * accomplished by relying on the module system to only have one copy 1490 * of the module loaded, and then by giving the rcu_torture_stats 1491 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1492 * thread is not running). 1493 */ 1494 static void 1495 rcu_torture_stats_print(void) 1496 { 1497 int cpu; 1498 int i; 1499 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1500 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1501 struct rcu_torture *rtcp; 1502 static unsigned long rtcv_snap = ULONG_MAX; 1503 static bool splatted; 1504 struct task_struct *wtp; 1505 1506 for_each_possible_cpu(cpu) { 1507 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1508 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1509 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1510 } 1511 } 1512 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1513 if (pipesummary[i] != 0) 1514 break; 1515 } 1516 1517 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1518 rtcp = rcu_access_pointer(rcu_torture_current); 1519 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1520 rtcp, 1521 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1522 rcu_torture_current_version, 1523 list_empty(&rcu_torture_freelist), 1524 atomic_read(&n_rcu_torture_alloc), 1525 atomic_read(&n_rcu_torture_alloc_fail), 1526 atomic_read(&n_rcu_torture_free)); 1527 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1528 atomic_read(&n_rcu_torture_mberror), 1529 n_rcu_torture_barrier_error, 1530 n_rcu_torture_boost_ktrerror, 1531 n_rcu_torture_boost_rterror); 1532 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1533 n_rcu_torture_boost_failure, 1534 n_rcu_torture_boosts, 1535 atomic_long_read(&n_rcu_torture_timers)); 1536 torture_onoff_stats(); 1537 pr_cont("barrier: %ld/%ld:%ld ", 1538 data_race(n_barrier_successes), 1539 data_race(n_barrier_attempts), 1540 data_race(n_rcu_torture_barrier_error)); 1541 pr_cont("read-exits: %ld\n", data_race(n_read_exits)); 1542 1543 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1544 if (atomic_read(&n_rcu_torture_mberror) || 1545 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1546 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1547 i > 1) { 1548 pr_cont("%s", "!!! "); 1549 atomic_inc(&n_rcu_torture_error); 1550 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1551 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1552 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1553 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1554 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed 1555 WARN_ON_ONCE(i > 1); // Too-short grace period 1556 } 1557 pr_cont("Reader Pipe: "); 1558 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1559 pr_cont(" %ld", pipesummary[i]); 1560 pr_cont("\n"); 1561 1562 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1563 pr_cont("Reader Batch: "); 1564 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1565 pr_cont(" %ld", batchsummary[i]); 1566 pr_cont("\n"); 1567 1568 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1569 pr_cont("Free-Block Circulation: "); 1570 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1571 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1572 } 1573 pr_cont("\n"); 1574 1575 if (cur_ops->stats) 1576 cur_ops->stats(); 1577 if (rtcv_snap == rcu_torture_current_version && 1578 rcu_access_pointer(rcu_torture_current) && 1579 !rcu_stall_is_suppressed()) { 1580 int __maybe_unused flags = 0; 1581 unsigned long __maybe_unused gp_seq = 0; 1582 1583 rcutorture_get_gp_data(cur_ops->ttype, 1584 &flags, &gp_seq); 1585 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1586 &flags, &gp_seq); 1587 wtp = READ_ONCE(writer_task); 1588 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1589 rcu_torture_writer_state_getname(), 1590 rcu_torture_writer_state, gp_seq, flags, 1591 wtp == NULL ? ~0UL : wtp->state, 1592 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1593 if (!splatted && wtp) { 1594 sched_show_task(wtp); 1595 splatted = true; 1596 } 1597 show_rcu_gp_kthreads(); 1598 rcu_ftrace_dump(DUMP_ALL); 1599 } 1600 rtcv_snap = rcu_torture_current_version; 1601 } 1602 1603 /* 1604 * Periodically prints torture statistics, if periodic statistics printing 1605 * was specified via the stat_interval module parameter. 1606 */ 1607 static int 1608 rcu_torture_stats(void *arg) 1609 { 1610 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1611 do { 1612 schedule_timeout_interruptible(stat_interval * HZ); 1613 rcu_torture_stats_print(); 1614 torture_shutdown_absorb("rcu_torture_stats"); 1615 } while (!torture_must_stop()); 1616 torture_kthread_stopping("rcu_torture_stats"); 1617 return 0; 1618 } 1619 1620 static void 1621 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1622 { 1623 pr_alert("%s" TORTURE_FLAG 1624 "--- %s: nreaders=%d nfakewriters=%d " 1625 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1626 "shuffle_interval=%d stutter=%d irqreader=%d " 1627 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1628 "test_boost=%d/%d test_boost_interval=%d " 1629 "test_boost_duration=%d shutdown_secs=%d " 1630 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1631 "stall_cpu_block=%d " 1632 "n_barrier_cbs=%d " 1633 "onoff_interval=%d onoff_holdoff=%d " 1634 "read_exit_delay=%d read_exit_burst=%d\n", 1635 torture_type, tag, nrealreaders, nfakewriters, 1636 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1637 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1638 test_boost, cur_ops->can_boost, 1639 test_boost_interval, test_boost_duration, shutdown_secs, 1640 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1641 stall_cpu_block, 1642 n_barrier_cbs, 1643 onoff_interval, onoff_holdoff, 1644 read_exit_delay, read_exit_burst); 1645 } 1646 1647 static int rcutorture_booster_cleanup(unsigned int cpu) 1648 { 1649 struct task_struct *t; 1650 1651 if (boost_tasks[cpu] == NULL) 1652 return 0; 1653 mutex_lock(&boost_mutex); 1654 t = boost_tasks[cpu]; 1655 boost_tasks[cpu] = NULL; 1656 rcu_torture_enable_rt_throttle(); 1657 mutex_unlock(&boost_mutex); 1658 1659 /* This must be outside of the mutex, otherwise deadlock! */ 1660 torture_stop_kthread(rcu_torture_boost, t); 1661 return 0; 1662 } 1663 1664 static int rcutorture_booster_init(unsigned int cpu) 1665 { 1666 int retval; 1667 1668 if (boost_tasks[cpu] != NULL) 1669 return 0; /* Already created, nothing more to do. */ 1670 1671 /* Don't allow time recalculation while creating a new task. */ 1672 mutex_lock(&boost_mutex); 1673 rcu_torture_disable_rt_throttle(); 1674 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1675 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1676 cpu_to_node(cpu), 1677 "rcu_torture_boost"); 1678 if (IS_ERR(boost_tasks[cpu])) { 1679 retval = PTR_ERR(boost_tasks[cpu]); 1680 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1681 n_rcu_torture_boost_ktrerror++; 1682 boost_tasks[cpu] = NULL; 1683 mutex_unlock(&boost_mutex); 1684 return retval; 1685 } 1686 kthread_bind(boost_tasks[cpu], cpu); 1687 wake_up_process(boost_tasks[cpu]); 1688 mutex_unlock(&boost_mutex); 1689 return 0; 1690 } 1691 1692 /* 1693 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1694 * induces a CPU stall for the time specified by stall_cpu. 1695 */ 1696 static int rcu_torture_stall(void *args) 1697 { 1698 int idx; 1699 unsigned long stop_at; 1700 1701 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1702 if (stall_cpu_holdoff > 0) { 1703 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1704 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1705 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1706 } 1707 if (!kthread_should_stop() && stall_gp_kthread > 0) { 1708 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 1709 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 1710 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 1711 if (kthread_should_stop()) 1712 break; 1713 schedule_timeout_uninterruptible(HZ); 1714 } 1715 } 1716 if (!kthread_should_stop() && stall_cpu > 0) { 1717 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 1718 stop_at = ktime_get_seconds() + stall_cpu; 1719 /* RCU CPU stall is expected behavior in following code. */ 1720 idx = cur_ops->readlock(); 1721 if (stall_cpu_irqsoff) 1722 local_irq_disable(); 1723 else if (!stall_cpu_block) 1724 preempt_disable(); 1725 pr_alert("rcu_torture_stall start on CPU %d.\n", 1726 raw_smp_processor_id()); 1727 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1728 stop_at)) 1729 if (stall_cpu_block) 1730 schedule_timeout_uninterruptible(HZ); 1731 if (stall_cpu_irqsoff) 1732 local_irq_enable(); 1733 else if (!stall_cpu_block) 1734 preempt_enable(); 1735 cur_ops->readunlock(idx); 1736 } 1737 pr_alert("rcu_torture_stall end.\n"); 1738 torture_shutdown_absorb("rcu_torture_stall"); 1739 while (!kthread_should_stop()) 1740 schedule_timeout_interruptible(10 * HZ); 1741 return 0; 1742 } 1743 1744 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1745 static int __init rcu_torture_stall_init(void) 1746 { 1747 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 1748 return 0; 1749 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1750 } 1751 1752 /* State structure for forward-progress self-propagating RCU callback. */ 1753 struct fwd_cb_state { 1754 struct rcu_head rh; 1755 int stop; 1756 }; 1757 1758 /* 1759 * Forward-progress self-propagating RCU callback function. Because 1760 * callbacks run from softirq, this function is an implicit RCU read-side 1761 * critical section. 1762 */ 1763 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1764 { 1765 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1766 1767 if (READ_ONCE(fcsp->stop)) { 1768 WRITE_ONCE(fcsp->stop, 2); 1769 return; 1770 } 1771 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1772 } 1773 1774 /* State for continuous-flood RCU callbacks. */ 1775 struct rcu_fwd_cb { 1776 struct rcu_head rh; 1777 struct rcu_fwd_cb *rfc_next; 1778 struct rcu_fwd *rfc_rfp; 1779 int rfc_gps; 1780 }; 1781 1782 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1783 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1784 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1785 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1786 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1787 1788 struct rcu_launder_hist { 1789 long n_launders; 1790 unsigned long launder_gp_seq; 1791 }; 1792 1793 struct rcu_fwd { 1794 spinlock_t rcu_fwd_lock; 1795 struct rcu_fwd_cb *rcu_fwd_cb_head; 1796 struct rcu_fwd_cb **rcu_fwd_cb_tail; 1797 long n_launders_cb; 1798 unsigned long rcu_fwd_startat; 1799 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1800 unsigned long rcu_launder_gp_seq_start; 1801 }; 1802 1803 static DEFINE_MUTEX(rcu_fwd_mutex); 1804 static struct rcu_fwd *rcu_fwds; 1805 static bool rcu_fwd_emergency_stop; 1806 1807 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 1808 { 1809 unsigned long gps; 1810 unsigned long gps_old; 1811 int i; 1812 int j; 1813 1814 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 1815 if (rfp->n_launders_hist[i].n_launders > 0) 1816 break; 1817 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1818 __func__, jiffies - rfp->rcu_fwd_startat); 1819 gps_old = rfp->rcu_launder_gp_seq_start; 1820 for (j = 0; j <= i; j++) { 1821 gps = rfp->n_launders_hist[j].launder_gp_seq; 1822 pr_cont(" %ds/%d: %ld:%ld", 1823 j + 1, FWD_CBS_HIST_DIV, 1824 rfp->n_launders_hist[j].n_launders, 1825 rcutorture_seq_diff(gps, gps_old)); 1826 gps_old = gps; 1827 } 1828 pr_cont("\n"); 1829 } 1830 1831 /* Callback function for continuous-flood RCU callbacks. */ 1832 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1833 { 1834 unsigned long flags; 1835 int i; 1836 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1837 struct rcu_fwd_cb **rfcpp; 1838 struct rcu_fwd *rfp = rfcp->rfc_rfp; 1839 1840 rfcp->rfc_next = NULL; 1841 rfcp->rfc_gps++; 1842 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1843 rfcpp = rfp->rcu_fwd_cb_tail; 1844 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 1845 WRITE_ONCE(*rfcpp, rfcp); 1846 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 1847 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1848 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 1849 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 1850 rfp->n_launders_hist[i].n_launders++; 1851 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1852 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1853 } 1854 1855 // Give the scheduler a chance, even on nohz_full CPUs. 1856 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 1857 { 1858 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1859 // Real call_rcu() floods hit userspace, so emulate that. 1860 if (need_resched() || (iter & 0xfff)) 1861 schedule(); 1862 return; 1863 } 1864 // No userspace emulation: CB invocation throttles call_rcu() 1865 cond_resched(); 1866 } 1867 1868 /* 1869 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1870 * test is over or because we hit an OOM event. 1871 */ 1872 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 1873 { 1874 unsigned long flags; 1875 unsigned long freed = 0; 1876 struct rcu_fwd_cb *rfcp; 1877 1878 for (;;) { 1879 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1880 rfcp = rfp->rcu_fwd_cb_head; 1881 if (!rfcp) { 1882 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1883 break; 1884 } 1885 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 1886 if (!rfp->rcu_fwd_cb_head) 1887 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 1888 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1889 kfree(rfcp); 1890 freed++; 1891 rcu_torture_fwd_prog_cond_resched(freed); 1892 if (tick_nohz_full_enabled()) { 1893 local_irq_save(flags); 1894 rcu_momentary_dyntick_idle(); 1895 local_irq_restore(flags); 1896 } 1897 } 1898 return freed; 1899 } 1900 1901 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1902 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 1903 int *tested, int *tested_tries) 1904 { 1905 unsigned long cver; 1906 unsigned long dur; 1907 struct fwd_cb_state fcs; 1908 unsigned long gps; 1909 int idx; 1910 int sd; 1911 int sd4; 1912 bool selfpropcb = false; 1913 unsigned long stopat; 1914 static DEFINE_TORTURE_RANDOM(trs); 1915 1916 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1917 init_rcu_head_on_stack(&fcs.rh); 1918 selfpropcb = true; 1919 } 1920 1921 /* Tight loop containing cond_resched(). */ 1922 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1923 cur_ops->sync(); /* Later readers see above write. */ 1924 if (selfpropcb) { 1925 WRITE_ONCE(fcs.stop, 0); 1926 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1927 } 1928 cver = READ_ONCE(rcu_torture_current_version); 1929 gps = cur_ops->get_gp_seq(); 1930 sd = cur_ops->stall_dur() + 1; 1931 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1932 dur = sd4 + torture_random(&trs) % (sd - sd4); 1933 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1934 stopat = rfp->rcu_fwd_startat + dur; 1935 while (time_before(jiffies, stopat) && 1936 !shutdown_time_arrived() && 1937 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1938 idx = cur_ops->readlock(); 1939 udelay(10); 1940 cur_ops->readunlock(idx); 1941 if (!fwd_progress_need_resched || need_resched()) 1942 cond_resched(); 1943 } 1944 (*tested_tries)++; 1945 if (!time_before(jiffies, stopat) && 1946 !shutdown_time_arrived() && 1947 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1948 (*tested)++; 1949 cver = READ_ONCE(rcu_torture_current_version) - cver; 1950 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1951 WARN_ON(!cver && gps < 2); 1952 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1953 } 1954 if (selfpropcb) { 1955 WRITE_ONCE(fcs.stop, 1); 1956 cur_ops->sync(); /* Wait for running CB to complete. */ 1957 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1958 } 1959 1960 if (selfpropcb) { 1961 WARN_ON(READ_ONCE(fcs.stop) != 2); 1962 destroy_rcu_head_on_stack(&fcs.rh); 1963 } 1964 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1965 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1966 } 1967 1968 /* Carry out call_rcu() forward-progress testing. */ 1969 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 1970 { 1971 unsigned long cver; 1972 unsigned long flags; 1973 unsigned long gps; 1974 int i; 1975 long n_launders; 1976 long n_launders_cb_snap; 1977 long n_launders_sa; 1978 long n_max_cbs; 1979 long n_max_gps; 1980 struct rcu_fwd_cb *rfcp; 1981 struct rcu_fwd_cb *rfcpn; 1982 unsigned long stopat; 1983 unsigned long stoppedat; 1984 1985 if (READ_ONCE(rcu_fwd_emergency_stop)) 1986 return; /* Get out of the way quickly, no GP wait! */ 1987 if (!cur_ops->call) 1988 return; /* Can't do call_rcu() fwd prog without ->call. */ 1989 1990 /* Loop continuously posting RCU callbacks. */ 1991 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1992 cur_ops->sync(); /* Later readers see above write. */ 1993 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1994 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1995 n_launders = 0; 1996 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 1997 n_launders_sa = 0; 1998 n_max_cbs = 0; 1999 n_max_gps = 0; 2000 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2001 rfp->n_launders_hist[i].n_launders = 0; 2002 cver = READ_ONCE(rcu_torture_current_version); 2003 gps = cur_ops->get_gp_seq(); 2004 rfp->rcu_launder_gp_seq_start = gps; 2005 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2006 while (time_before(jiffies, stopat) && 2007 !shutdown_time_arrived() && 2008 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2009 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2010 rfcpn = NULL; 2011 if (rfcp) 2012 rfcpn = READ_ONCE(rfcp->rfc_next); 2013 if (rfcpn) { 2014 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2015 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2016 break; 2017 rfp->rcu_fwd_cb_head = rfcpn; 2018 n_launders++; 2019 n_launders_sa++; 2020 } else { 2021 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2022 if (WARN_ON_ONCE(!rfcp)) { 2023 schedule_timeout_interruptible(1); 2024 continue; 2025 } 2026 n_max_cbs++; 2027 n_launders_sa = 0; 2028 rfcp->rfc_gps = 0; 2029 rfcp->rfc_rfp = rfp; 2030 } 2031 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2032 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2033 if (tick_nohz_full_enabled()) { 2034 local_irq_save(flags); 2035 rcu_momentary_dyntick_idle(); 2036 local_irq_restore(flags); 2037 } 2038 } 2039 stoppedat = jiffies; 2040 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2041 cver = READ_ONCE(rcu_torture_current_version) - cver; 2042 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2043 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2044 (void)rcu_torture_fwd_prog_cbfree(rfp); 2045 2046 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2047 !shutdown_time_arrived()) { 2048 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2049 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2050 __func__, 2051 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2052 n_launders + n_max_cbs - n_launders_cb_snap, 2053 n_launders, n_launders_sa, 2054 n_max_gps, n_max_cbs, cver, gps); 2055 rcu_torture_fwd_cb_hist(rfp); 2056 } 2057 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2058 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2059 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2060 } 2061 2062 2063 /* 2064 * OOM notifier, but this only prints diagnostic information for the 2065 * current forward-progress test. 2066 */ 2067 static int rcutorture_oom_notify(struct notifier_block *self, 2068 unsigned long notused, void *nfreed) 2069 { 2070 struct rcu_fwd *rfp; 2071 2072 mutex_lock(&rcu_fwd_mutex); 2073 rfp = rcu_fwds; 2074 if (!rfp) { 2075 mutex_unlock(&rcu_fwd_mutex); 2076 return NOTIFY_OK; 2077 } 2078 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2079 __func__); 2080 rcu_torture_fwd_cb_hist(rfp); 2081 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2082 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2083 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2084 pr_info("%s: Freed %lu RCU callbacks.\n", 2085 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2086 rcu_barrier(); 2087 pr_info("%s: Freed %lu RCU callbacks.\n", 2088 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2089 rcu_barrier(); 2090 pr_info("%s: Freed %lu RCU callbacks.\n", 2091 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2092 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2093 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2094 pr_info("%s returning after OOM processing.\n", __func__); 2095 mutex_unlock(&rcu_fwd_mutex); 2096 return NOTIFY_OK; 2097 } 2098 2099 static struct notifier_block rcutorture_oom_nb = { 2100 .notifier_call = rcutorture_oom_notify 2101 }; 2102 2103 /* Carry out grace-period forward-progress testing. */ 2104 static int rcu_torture_fwd_prog(void *args) 2105 { 2106 struct rcu_fwd *rfp = args; 2107 int tested = 0; 2108 int tested_tries = 0; 2109 2110 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2111 rcu_bind_current_to_nocb(); 2112 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2113 set_user_nice(current, MAX_NICE); 2114 do { 2115 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2116 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2117 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2118 rcu_inkernel_boot_has_ended()) 2119 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2120 if (rcu_inkernel_boot_has_ended()) 2121 rcu_torture_fwd_prog_cr(rfp); 2122 2123 /* Avoid slow periods, better to test when busy. */ 2124 stutter_wait("rcu_torture_fwd_prog"); 2125 } while (!torture_must_stop()); 2126 /* Short runs might not contain a valid forward-progress attempt. */ 2127 WARN_ON(!tested && tested_tries >= 5); 2128 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2129 torture_kthread_stopping("rcu_torture_fwd_prog"); 2130 return 0; 2131 } 2132 2133 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2134 static int __init rcu_torture_fwd_prog_init(void) 2135 { 2136 struct rcu_fwd *rfp; 2137 2138 if (!fwd_progress) 2139 return 0; /* Not requested, so don't do it. */ 2140 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 2141 cur_ops == &rcu_busted_ops) { 2142 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2143 return 0; 2144 } 2145 if (stall_cpu > 0) { 2146 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2147 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2148 return -EINVAL; /* In module, can fail back to user. */ 2149 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2150 return 0; 2151 } 2152 if (fwd_progress_holdoff <= 0) 2153 fwd_progress_holdoff = 1; 2154 if (fwd_progress_div <= 0) 2155 fwd_progress_div = 4; 2156 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2157 if (!rfp) 2158 return -ENOMEM; 2159 spin_lock_init(&rfp->rcu_fwd_lock); 2160 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2161 mutex_lock(&rcu_fwd_mutex); 2162 rcu_fwds = rfp; 2163 mutex_unlock(&rcu_fwd_mutex); 2164 register_oom_notifier(&rcutorture_oom_nb); 2165 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2166 } 2167 2168 static void rcu_torture_fwd_prog_cleanup(void) 2169 { 2170 struct rcu_fwd *rfp; 2171 2172 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2173 rfp = rcu_fwds; 2174 mutex_lock(&rcu_fwd_mutex); 2175 rcu_fwds = NULL; 2176 mutex_unlock(&rcu_fwd_mutex); 2177 unregister_oom_notifier(&rcutorture_oom_nb); 2178 kfree(rfp); 2179 } 2180 2181 /* Callback function for RCU barrier testing. */ 2182 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2183 { 2184 atomic_inc(&barrier_cbs_invoked); 2185 } 2186 2187 /* IPI handler to get callback posted on desired CPU, if online. */ 2188 static void rcu_torture_barrier1cb(void *rcu_void) 2189 { 2190 struct rcu_head *rhp = rcu_void; 2191 2192 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2193 } 2194 2195 /* kthread function to register callbacks used to test RCU barriers. */ 2196 static int rcu_torture_barrier_cbs(void *arg) 2197 { 2198 long myid = (long)arg; 2199 bool lastphase = false; 2200 bool newphase; 2201 struct rcu_head rcu; 2202 2203 init_rcu_head_on_stack(&rcu); 2204 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2205 set_user_nice(current, MAX_NICE); 2206 do { 2207 wait_event(barrier_cbs_wq[myid], 2208 (newphase = 2209 smp_load_acquire(&barrier_phase)) != lastphase || 2210 torture_must_stop()); 2211 lastphase = newphase; 2212 if (torture_must_stop()) 2213 break; 2214 /* 2215 * The above smp_load_acquire() ensures barrier_phase load 2216 * is ordered before the following ->call(). 2217 */ 2218 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2219 &rcu, 1)) { 2220 // IPI failed, so use direct call from current CPU. 2221 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2222 } 2223 if (atomic_dec_and_test(&barrier_cbs_count)) 2224 wake_up(&barrier_wq); 2225 } while (!torture_must_stop()); 2226 if (cur_ops->cb_barrier != NULL) 2227 cur_ops->cb_barrier(); 2228 destroy_rcu_head_on_stack(&rcu); 2229 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2230 return 0; 2231 } 2232 2233 /* kthread function to drive and coordinate RCU barrier testing. */ 2234 static int rcu_torture_barrier(void *arg) 2235 { 2236 int i; 2237 2238 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2239 do { 2240 atomic_set(&barrier_cbs_invoked, 0); 2241 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2242 /* Ensure barrier_phase ordered after prior assignments. */ 2243 smp_store_release(&barrier_phase, !barrier_phase); 2244 for (i = 0; i < n_barrier_cbs; i++) 2245 wake_up(&barrier_cbs_wq[i]); 2246 wait_event(barrier_wq, 2247 atomic_read(&barrier_cbs_count) == 0 || 2248 torture_must_stop()); 2249 if (torture_must_stop()) 2250 break; 2251 n_barrier_attempts++; 2252 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2253 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2254 n_rcu_torture_barrier_error++; 2255 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2256 atomic_read(&barrier_cbs_invoked), 2257 n_barrier_cbs); 2258 WARN_ON(1); 2259 // Wait manually for the remaining callbacks 2260 i = 0; 2261 do { 2262 if (WARN_ON(i++ > HZ)) 2263 i = INT_MIN; 2264 schedule_timeout_interruptible(1); 2265 cur_ops->cb_barrier(); 2266 } while (atomic_read(&barrier_cbs_invoked) != 2267 n_barrier_cbs && 2268 !torture_must_stop()); 2269 smp_mb(); // Can't trust ordering if broken. 2270 if (!torture_must_stop()) 2271 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2272 atomic_read(&barrier_cbs_invoked)); 2273 } else { 2274 n_barrier_successes++; 2275 } 2276 schedule_timeout_interruptible(HZ / 10); 2277 } while (!torture_must_stop()); 2278 torture_kthread_stopping("rcu_torture_barrier"); 2279 return 0; 2280 } 2281 2282 /* Initialize RCU barrier testing. */ 2283 static int rcu_torture_barrier_init(void) 2284 { 2285 int i; 2286 int ret; 2287 2288 if (n_barrier_cbs <= 0) 2289 return 0; 2290 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2291 pr_alert("%s" TORTURE_FLAG 2292 " Call or barrier ops missing for %s,\n", 2293 torture_type, cur_ops->name); 2294 pr_alert("%s" TORTURE_FLAG 2295 " RCU barrier testing omitted from run.\n", 2296 torture_type); 2297 return 0; 2298 } 2299 atomic_set(&barrier_cbs_count, 0); 2300 atomic_set(&barrier_cbs_invoked, 0); 2301 barrier_cbs_tasks = 2302 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2303 GFP_KERNEL); 2304 barrier_cbs_wq = 2305 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2306 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2307 return -ENOMEM; 2308 for (i = 0; i < n_barrier_cbs; i++) { 2309 init_waitqueue_head(&barrier_cbs_wq[i]); 2310 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2311 (void *)(long)i, 2312 barrier_cbs_tasks[i]); 2313 if (ret) 2314 return ret; 2315 } 2316 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2317 } 2318 2319 /* Clean up after RCU barrier testing. */ 2320 static void rcu_torture_barrier_cleanup(void) 2321 { 2322 int i; 2323 2324 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2325 if (barrier_cbs_tasks != NULL) { 2326 for (i = 0; i < n_barrier_cbs; i++) 2327 torture_stop_kthread(rcu_torture_barrier_cbs, 2328 barrier_cbs_tasks[i]); 2329 kfree(barrier_cbs_tasks); 2330 barrier_cbs_tasks = NULL; 2331 } 2332 if (barrier_cbs_wq != NULL) { 2333 kfree(barrier_cbs_wq); 2334 barrier_cbs_wq = NULL; 2335 } 2336 } 2337 2338 static bool rcu_torture_can_boost(void) 2339 { 2340 static int boost_warn_once; 2341 int prio; 2342 2343 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2344 return false; 2345 2346 prio = rcu_get_gp_kthreads_prio(); 2347 if (!prio) 2348 return false; 2349 2350 if (prio < 2) { 2351 if (boost_warn_once == 1) 2352 return false; 2353 2354 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2355 boost_warn_once = 1; 2356 return false; 2357 } 2358 2359 return true; 2360 } 2361 2362 static bool read_exit_child_stop; 2363 static bool read_exit_child_stopped; 2364 static wait_queue_head_t read_exit_wq; 2365 2366 // Child kthread which just does an rcutorture reader and exits. 2367 static int rcu_torture_read_exit_child(void *trsp_in) 2368 { 2369 struct torture_random_state *trsp = trsp_in; 2370 2371 set_user_nice(current, MAX_NICE); 2372 // Minimize time between reading and exiting. 2373 while (!kthread_should_stop()) 2374 schedule_timeout_uninterruptible(1); 2375 (void)rcu_torture_one_read(trsp); 2376 return 0; 2377 } 2378 2379 // Parent kthread which creates and destroys read-exit child kthreads. 2380 static int rcu_torture_read_exit(void *unused) 2381 { 2382 int count = 0; 2383 bool errexit = false; 2384 int i; 2385 struct task_struct *tsp; 2386 DEFINE_TORTURE_RANDOM(trs); 2387 2388 // Allocate and initialize. 2389 set_user_nice(current, MAX_NICE); 2390 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 2391 2392 // Each pass through this loop does one read-exit episode. 2393 do { 2394 if (++count > read_exit_burst) { 2395 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 2396 rcu_barrier(); // Wait for task_struct free, avoid OOM. 2397 for (i = 0; i < read_exit_delay; i++) { 2398 schedule_timeout_uninterruptible(HZ); 2399 if (READ_ONCE(read_exit_child_stop)) 2400 break; 2401 } 2402 if (!READ_ONCE(read_exit_child_stop)) 2403 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 2404 count = 0; 2405 } 2406 if (READ_ONCE(read_exit_child_stop)) 2407 break; 2408 // Spawn child. 2409 tsp = kthread_run(rcu_torture_read_exit_child, 2410 &trs, "%s", 2411 "rcu_torture_read_exit_child"); 2412 if (IS_ERR(tsp)) { 2413 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2414 errexit = true; 2415 tsp = NULL; 2416 break; 2417 } 2418 cond_resched(); 2419 kthread_stop(tsp); 2420 n_read_exits ++; 2421 stutter_wait("rcu_torture_read_exit"); 2422 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 2423 2424 // Clean up and exit. 2425 smp_store_release(&read_exit_child_stopped, true); // After reaping. 2426 smp_mb(); // Store before wakeup. 2427 wake_up(&read_exit_wq); 2428 while (!torture_must_stop()) 2429 schedule_timeout_uninterruptible(1); 2430 torture_kthread_stopping("rcu_torture_read_exit"); 2431 return 0; 2432 } 2433 2434 static int rcu_torture_read_exit_init(void) 2435 { 2436 if (read_exit_burst <= 0) 2437 return -EINVAL; 2438 init_waitqueue_head(&read_exit_wq); 2439 read_exit_child_stop = false; 2440 read_exit_child_stopped = false; 2441 return torture_create_kthread(rcu_torture_read_exit, NULL, 2442 read_exit_task); 2443 } 2444 2445 static void rcu_torture_read_exit_cleanup(void) 2446 { 2447 if (!read_exit_task) 2448 return; 2449 WRITE_ONCE(read_exit_child_stop, true); 2450 smp_mb(); // Above write before wait. 2451 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 2452 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 2453 } 2454 2455 static enum cpuhp_state rcutor_hp; 2456 2457 static void 2458 rcu_torture_cleanup(void) 2459 { 2460 int firsttime; 2461 int flags = 0; 2462 unsigned long gp_seq = 0; 2463 int i; 2464 2465 if (torture_cleanup_begin()) { 2466 if (cur_ops->cb_barrier != NULL) 2467 cur_ops->cb_barrier(); 2468 return; 2469 } 2470 if (!cur_ops) { 2471 torture_cleanup_end(); 2472 return; 2473 } 2474 2475 show_rcu_gp_kthreads(); 2476 rcu_torture_read_exit_cleanup(); 2477 rcu_torture_barrier_cleanup(); 2478 rcu_torture_fwd_prog_cleanup(); 2479 torture_stop_kthread(rcu_torture_stall, stall_task); 2480 torture_stop_kthread(rcu_torture_writer, writer_task); 2481 2482 if (reader_tasks) { 2483 for (i = 0; i < nrealreaders; i++) 2484 torture_stop_kthread(rcu_torture_reader, 2485 reader_tasks[i]); 2486 kfree(reader_tasks); 2487 } 2488 2489 if (fakewriter_tasks) { 2490 for (i = 0; i < nfakewriters; i++) { 2491 torture_stop_kthread(rcu_torture_fakewriter, 2492 fakewriter_tasks[i]); 2493 } 2494 kfree(fakewriter_tasks); 2495 fakewriter_tasks = NULL; 2496 } 2497 2498 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2499 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2500 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 2501 cur_ops->name, (long)gp_seq, flags, 2502 rcutorture_seq_diff(gp_seq, start_gp_seq)); 2503 torture_stop_kthread(rcu_torture_stats, stats_task); 2504 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2505 if (rcu_torture_can_boost()) 2506 cpuhp_remove_state(rcutor_hp); 2507 2508 /* 2509 * Wait for all RCU callbacks to fire, then do torture-type-specific 2510 * cleanup operations. 2511 */ 2512 if (cur_ops->cb_barrier != NULL) 2513 cur_ops->cb_barrier(); 2514 if (cur_ops->cleanup != NULL) 2515 cur_ops->cleanup(); 2516 2517 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2518 2519 if (err_segs_recorded) { 2520 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2521 if (rt_read_nsegs == 0) 2522 pr_alert("\t: No segments recorded!!!\n"); 2523 firsttime = 1; 2524 for (i = 0; i < rt_read_nsegs; i++) { 2525 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2526 if (err_segs[i].rt_delay_jiffies != 0) { 2527 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2528 err_segs[i].rt_delay_jiffies); 2529 firsttime = 0; 2530 } 2531 if (err_segs[i].rt_delay_ms != 0) { 2532 pr_cont("%s%ldms", firsttime ? "" : "+", 2533 err_segs[i].rt_delay_ms); 2534 firsttime = 0; 2535 } 2536 if (err_segs[i].rt_delay_us != 0) { 2537 pr_cont("%s%ldus", firsttime ? "" : "+", 2538 err_segs[i].rt_delay_us); 2539 firsttime = 0; 2540 } 2541 pr_cont("%s\n", 2542 err_segs[i].rt_preempted ? "preempted" : ""); 2543 2544 } 2545 } 2546 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2547 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2548 else if (torture_onoff_failures()) 2549 rcu_torture_print_module_parms(cur_ops, 2550 "End of test: RCU_HOTPLUG"); 2551 else 2552 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2553 torture_cleanup_end(); 2554 } 2555 2556 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2557 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2558 { 2559 } 2560 2561 static void rcu_torture_err_cb(struct rcu_head *rhp) 2562 { 2563 /* 2564 * This -might- happen due to race conditions, but is unlikely. 2565 * The scenario that leads to this happening is that the 2566 * first of the pair of duplicate callbacks is queued, 2567 * someone else starts a grace period that includes that 2568 * callback, then the second of the pair must wait for the 2569 * next grace period. Unlikely, but can happen. If it 2570 * does happen, the debug-objects subsystem won't have splatted. 2571 */ 2572 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2573 } 2574 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2575 2576 /* 2577 * Verify that double-free causes debug-objects to complain, but only 2578 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2579 * cannot be carried out. 2580 */ 2581 static void rcu_test_debug_objects(void) 2582 { 2583 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2584 struct rcu_head rh1; 2585 struct rcu_head rh2; 2586 2587 init_rcu_head_on_stack(&rh1); 2588 init_rcu_head_on_stack(&rh2); 2589 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2590 2591 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2592 preempt_disable(); /* Prevent preemption from interrupting test. */ 2593 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2594 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2595 local_irq_disable(); /* Make it harder to start a new grace period. */ 2596 call_rcu(&rh2, rcu_torture_leak_cb); 2597 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2598 local_irq_enable(); 2599 rcu_read_unlock(); 2600 preempt_enable(); 2601 2602 /* Wait for them all to get done so we can safely return. */ 2603 rcu_barrier(); 2604 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2605 destroy_rcu_head_on_stack(&rh1); 2606 destroy_rcu_head_on_stack(&rh2); 2607 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2608 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2609 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2610 } 2611 2612 static void rcutorture_sync(void) 2613 { 2614 static unsigned long n; 2615 2616 if (cur_ops->sync && !(++n & 0xfff)) 2617 cur_ops->sync(); 2618 } 2619 2620 static int __init 2621 rcu_torture_init(void) 2622 { 2623 long i; 2624 int cpu; 2625 int firsterr = 0; 2626 int flags = 0; 2627 unsigned long gp_seq = 0; 2628 static struct rcu_torture_ops *torture_ops[] = { 2629 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2630 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, 2631 &tasks_tracing_ops, &trivial_ops, 2632 }; 2633 2634 if (!torture_init_begin(torture_type, verbose)) 2635 return -EBUSY; 2636 2637 /* Process args and tell the world that the torturer is on the job. */ 2638 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2639 cur_ops = torture_ops[i]; 2640 if (strcmp(torture_type, cur_ops->name) == 0) 2641 break; 2642 } 2643 if (i == ARRAY_SIZE(torture_ops)) { 2644 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2645 torture_type); 2646 pr_alert("rcu-torture types:"); 2647 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2648 pr_cont(" %s", torture_ops[i]->name); 2649 pr_cont("\n"); 2650 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2651 firsterr = -EINVAL; 2652 cur_ops = NULL; 2653 goto unwind; 2654 } 2655 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2656 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2657 fqs_duration = 0; 2658 } 2659 if (cur_ops->init) 2660 cur_ops->init(); 2661 2662 if (nreaders >= 0) { 2663 nrealreaders = nreaders; 2664 } else { 2665 nrealreaders = num_online_cpus() - 2 - nreaders; 2666 if (nrealreaders <= 0) 2667 nrealreaders = 1; 2668 } 2669 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2670 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2671 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2672 start_gp_seq = gp_seq; 2673 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 2674 cur_ops->name, (long)gp_seq, flags); 2675 2676 /* Set up the freelist. */ 2677 2678 INIT_LIST_HEAD(&rcu_torture_freelist); 2679 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2680 rcu_tortures[i].rtort_mbtest = 0; 2681 list_add_tail(&rcu_tortures[i].rtort_free, 2682 &rcu_torture_freelist); 2683 } 2684 2685 /* Initialize the statistics so that each run gets its own numbers. */ 2686 2687 rcu_torture_current = NULL; 2688 rcu_torture_current_version = 0; 2689 atomic_set(&n_rcu_torture_alloc, 0); 2690 atomic_set(&n_rcu_torture_alloc_fail, 0); 2691 atomic_set(&n_rcu_torture_free, 0); 2692 atomic_set(&n_rcu_torture_mberror, 0); 2693 atomic_set(&n_rcu_torture_error, 0); 2694 n_rcu_torture_barrier_error = 0; 2695 n_rcu_torture_boost_ktrerror = 0; 2696 n_rcu_torture_boost_rterror = 0; 2697 n_rcu_torture_boost_failure = 0; 2698 n_rcu_torture_boosts = 0; 2699 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2700 atomic_set(&rcu_torture_wcount[i], 0); 2701 for_each_possible_cpu(cpu) { 2702 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2703 per_cpu(rcu_torture_count, cpu)[i] = 0; 2704 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2705 } 2706 } 2707 err_segs_recorded = 0; 2708 rt_read_nsegs = 0; 2709 2710 /* Start up the kthreads. */ 2711 2712 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2713 writer_task); 2714 if (firsterr) 2715 goto unwind; 2716 if (nfakewriters > 0) { 2717 fakewriter_tasks = kcalloc(nfakewriters, 2718 sizeof(fakewriter_tasks[0]), 2719 GFP_KERNEL); 2720 if (fakewriter_tasks == NULL) { 2721 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2722 firsterr = -ENOMEM; 2723 goto unwind; 2724 } 2725 } 2726 for (i = 0; i < nfakewriters; i++) { 2727 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2728 NULL, fakewriter_tasks[i]); 2729 if (firsterr) 2730 goto unwind; 2731 } 2732 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2733 GFP_KERNEL); 2734 if (reader_tasks == NULL) { 2735 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2736 firsterr = -ENOMEM; 2737 goto unwind; 2738 } 2739 for (i = 0; i < nrealreaders; i++) { 2740 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2741 reader_tasks[i]); 2742 if (firsterr) 2743 goto unwind; 2744 } 2745 if (stat_interval > 0) { 2746 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2747 stats_task); 2748 if (firsterr) 2749 goto unwind; 2750 } 2751 if (test_no_idle_hz && shuffle_interval > 0) { 2752 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2753 if (firsterr) 2754 goto unwind; 2755 } 2756 if (stutter < 0) 2757 stutter = 0; 2758 if (stutter) { 2759 int t; 2760 2761 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2762 firsterr = torture_stutter_init(stutter * HZ, t); 2763 if (firsterr) 2764 goto unwind; 2765 } 2766 if (fqs_duration < 0) 2767 fqs_duration = 0; 2768 if (fqs_duration) { 2769 /* Create the fqs thread */ 2770 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2771 fqs_task); 2772 if (firsterr) 2773 goto unwind; 2774 } 2775 if (test_boost_interval < 1) 2776 test_boost_interval = 1; 2777 if (test_boost_duration < 2) 2778 test_boost_duration = 2; 2779 if (rcu_torture_can_boost()) { 2780 2781 boost_starttime = jiffies + test_boost_interval * HZ; 2782 2783 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2784 rcutorture_booster_init, 2785 rcutorture_booster_cleanup); 2786 if (firsterr < 0) 2787 goto unwind; 2788 rcutor_hp = firsterr; 2789 } 2790 shutdown_jiffies = jiffies + shutdown_secs * HZ; 2791 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2792 if (firsterr) 2793 goto unwind; 2794 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2795 rcutorture_sync); 2796 if (firsterr) 2797 goto unwind; 2798 firsterr = rcu_torture_stall_init(); 2799 if (firsterr) 2800 goto unwind; 2801 firsterr = rcu_torture_fwd_prog_init(); 2802 if (firsterr) 2803 goto unwind; 2804 firsterr = rcu_torture_barrier_init(); 2805 if (firsterr) 2806 goto unwind; 2807 firsterr = rcu_torture_read_exit_init(); 2808 if (firsterr) 2809 goto unwind; 2810 if (object_debug) 2811 rcu_test_debug_objects(); 2812 torture_init_end(); 2813 return 0; 2814 2815 unwind: 2816 torture_init_end(); 2817 rcu_torture_cleanup(); 2818 return firsterr; 2819 } 2820 2821 module_init(rcu_torture_init); 2822 module_exit(rcu_torture_cleanup); 2823