1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 50 #include "rcu.h" 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 54 55 #ifndef data_race 56 #define data_race(expr) \ 57 ({ \ 58 expr; \ 59 }) 60 #endif 61 #ifndef ASSERT_EXCLUSIVE_WRITER 62 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) 63 #endif 64 #ifndef ASSERT_EXCLUSIVE_ACCESS 65 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) 66 #endif 67 68 /* Bits for ->extendables field, extendables param, and related definitions. */ 69 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 70 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 71 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 72 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 73 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 74 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 75 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 76 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 77 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 78 #define RCUTORTURE_MAX_EXTEND \ 79 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 80 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 81 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 82 /* Must be power of two minus one. */ 83 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 84 85 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 86 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 87 torture_param(int, fqs_duration, 0, 88 "Duration of fqs bursts (us), 0 to disable"); 89 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 90 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 91 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 92 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 93 torture_param(int, fwd_progress_holdoff, 60, 94 "Time between forward-progress tests (s)"); 95 torture_param(bool, fwd_progress_need_resched, 1, 96 "Hide cond_resched() behind need_resched()"); 97 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 98 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 99 torture_param(bool, gp_normal, false, 100 "Use normal (non-expedited) GP wait primitives"); 101 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 102 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 103 torture_param(int, n_barrier_cbs, 0, 104 "# of callbacks/kthreads for barrier testing"); 105 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 106 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 107 torture_param(int, object_debug, 0, 108 "Enable debug-object double call_rcu() testing"); 109 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 110 torture_param(int, onoff_interval, 0, 111 "Time between CPU hotplugs (jiffies), 0=disable"); 112 torture_param(int, read_exit_delay, 13, 113 "Delay between read-then-exit episodes (s)"); 114 torture_param(int, read_exit_burst, 16, 115 "# of read-then-exit bursts per episode, zero to disable"); 116 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 117 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 118 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 119 torture_param(int, stall_cpu_holdoff, 10, 120 "Time to wait before starting stall (s)."); 121 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 122 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 123 torture_param(int, stall_gp_kthread, 0, 124 "Grace-period kthread stall duration (s)."); 125 torture_param(int, stat_interval, 60, 126 "Number of seconds between stats printk()s"); 127 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 128 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 129 torture_param(int, test_boost_duration, 4, 130 "Duration of each boost test, seconds."); 131 torture_param(int, test_boost_interval, 7, 132 "Interval between boost tests, seconds."); 133 torture_param(bool, test_no_idle_hz, true, 134 "Test support for tickless idle CPUs"); 135 torture_param(int, verbose, 1, 136 "Enable verbose debugging printk()s"); 137 138 static char *torture_type = "rcu"; 139 module_param(torture_type, charp, 0444); 140 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 141 142 static int nrealreaders; 143 static struct task_struct *writer_task; 144 static struct task_struct **fakewriter_tasks; 145 static struct task_struct **reader_tasks; 146 static struct task_struct *stats_task; 147 static struct task_struct *fqs_task; 148 static struct task_struct *boost_tasks[NR_CPUS]; 149 static struct task_struct *stall_task; 150 static struct task_struct *fwd_prog_task; 151 static struct task_struct **barrier_cbs_tasks; 152 static struct task_struct *barrier_task; 153 static struct task_struct *read_exit_task; 154 155 #define RCU_TORTURE_PIPE_LEN 10 156 157 struct rcu_torture { 158 struct rcu_head rtort_rcu; 159 int rtort_pipe_count; 160 struct list_head rtort_free; 161 int rtort_mbtest; 162 }; 163 164 static LIST_HEAD(rcu_torture_freelist); 165 static struct rcu_torture __rcu *rcu_torture_current; 166 static unsigned long rcu_torture_current_version; 167 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 168 static DEFINE_SPINLOCK(rcu_torture_lock); 169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 170 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 171 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 172 static atomic_t n_rcu_torture_alloc; 173 static atomic_t n_rcu_torture_alloc_fail; 174 static atomic_t n_rcu_torture_free; 175 static atomic_t n_rcu_torture_mberror; 176 static atomic_t n_rcu_torture_error; 177 static long n_rcu_torture_barrier_error; 178 static long n_rcu_torture_boost_ktrerror; 179 static long n_rcu_torture_boost_rterror; 180 static long n_rcu_torture_boost_failure; 181 static long n_rcu_torture_boosts; 182 static atomic_long_t n_rcu_torture_timers; 183 static long n_barrier_attempts; 184 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 185 static unsigned long n_read_exits; 186 static struct list_head rcu_torture_removed; 187 static unsigned long shutdown_jiffies; 188 189 static int rcu_torture_writer_state; 190 #define RTWS_FIXED_DELAY 0 191 #define RTWS_DELAY 1 192 #define RTWS_REPLACE 2 193 #define RTWS_DEF_FREE 3 194 #define RTWS_EXP_SYNC 4 195 #define RTWS_COND_GET 5 196 #define RTWS_COND_SYNC 6 197 #define RTWS_SYNC 7 198 #define RTWS_STUTTER 8 199 #define RTWS_STOPPING 9 200 static const char * const rcu_torture_writer_state_names[] = { 201 "RTWS_FIXED_DELAY", 202 "RTWS_DELAY", 203 "RTWS_REPLACE", 204 "RTWS_DEF_FREE", 205 "RTWS_EXP_SYNC", 206 "RTWS_COND_GET", 207 "RTWS_COND_SYNC", 208 "RTWS_SYNC", 209 "RTWS_STUTTER", 210 "RTWS_STOPPING", 211 }; 212 213 /* Record reader segment types and duration for first failing read. */ 214 struct rt_read_seg { 215 int rt_readstate; 216 unsigned long rt_delay_jiffies; 217 unsigned long rt_delay_ms; 218 unsigned long rt_delay_us; 219 bool rt_preempted; 220 }; 221 static int err_segs_recorded; 222 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 223 static int rt_read_nsegs; 224 225 static const char *rcu_torture_writer_state_getname(void) 226 { 227 unsigned int i = READ_ONCE(rcu_torture_writer_state); 228 229 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 230 return "???"; 231 return rcu_torture_writer_state_names[i]; 232 } 233 234 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 235 #define rcu_can_boost() 1 236 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 237 #define rcu_can_boost() 0 238 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 239 240 #ifdef CONFIG_RCU_TRACE 241 static u64 notrace rcu_trace_clock_local(void) 242 { 243 u64 ts = trace_clock_local(); 244 245 (void)do_div(ts, NSEC_PER_USEC); 246 return ts; 247 } 248 #else /* #ifdef CONFIG_RCU_TRACE */ 249 static u64 notrace rcu_trace_clock_local(void) 250 { 251 return 0ULL; 252 } 253 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 254 255 /* 256 * Stop aggressive CPU-hog tests a bit before the end of the test in order 257 * to avoid interfering with test shutdown. 258 */ 259 static bool shutdown_time_arrived(void) 260 { 261 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 262 } 263 264 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 265 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 266 /* and boost task create/destroy. */ 267 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 268 static bool barrier_phase; /* Test phase. */ 269 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 270 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 271 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 272 273 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 274 275 /* 276 * Allocate an element from the rcu_tortures pool. 277 */ 278 static struct rcu_torture * 279 rcu_torture_alloc(void) 280 { 281 struct list_head *p; 282 283 spin_lock_bh(&rcu_torture_lock); 284 if (list_empty(&rcu_torture_freelist)) { 285 atomic_inc(&n_rcu_torture_alloc_fail); 286 spin_unlock_bh(&rcu_torture_lock); 287 return NULL; 288 } 289 atomic_inc(&n_rcu_torture_alloc); 290 p = rcu_torture_freelist.next; 291 list_del_init(p); 292 spin_unlock_bh(&rcu_torture_lock); 293 return container_of(p, struct rcu_torture, rtort_free); 294 } 295 296 /* 297 * Free an element to the rcu_tortures pool. 298 */ 299 static void 300 rcu_torture_free(struct rcu_torture *p) 301 { 302 atomic_inc(&n_rcu_torture_free); 303 spin_lock_bh(&rcu_torture_lock); 304 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 305 spin_unlock_bh(&rcu_torture_lock); 306 } 307 308 /* 309 * Operations vector for selecting different types of tests. 310 */ 311 312 struct rcu_torture_ops { 313 int ttype; 314 void (*init)(void); 315 void (*cleanup)(void); 316 int (*readlock)(void); 317 void (*read_delay)(struct torture_random_state *rrsp, 318 struct rt_read_seg *rtrsp); 319 void (*readunlock)(int idx); 320 unsigned long (*get_gp_seq)(void); 321 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 322 void (*deferred_free)(struct rcu_torture *p); 323 void (*sync)(void); 324 void (*exp_sync)(void); 325 unsigned long (*get_state)(void); 326 void (*cond_sync)(unsigned long oldstate); 327 call_rcu_func_t call; 328 void (*cb_barrier)(void); 329 void (*fqs)(void); 330 void (*stats)(void); 331 int (*stall_dur)(void); 332 int irq_capable; 333 int can_boost; 334 int extendables; 335 int slow_gps; 336 const char *name; 337 }; 338 339 static struct rcu_torture_ops *cur_ops; 340 341 /* 342 * Definitions for rcu torture testing. 343 */ 344 345 static int rcu_torture_read_lock(void) __acquires(RCU) 346 { 347 rcu_read_lock(); 348 return 0; 349 } 350 351 static void 352 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 353 { 354 unsigned long started; 355 unsigned long completed; 356 const unsigned long shortdelay_us = 200; 357 unsigned long longdelay_ms = 300; 358 unsigned long long ts; 359 360 /* We want a short delay sometimes to make a reader delay the grace 361 * period, and we want a long delay occasionally to trigger 362 * force_quiescent_state. */ 363 364 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 365 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 366 started = cur_ops->get_gp_seq(); 367 ts = rcu_trace_clock_local(); 368 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 369 longdelay_ms = 5; /* Avoid triggering BH limits. */ 370 mdelay(longdelay_ms); 371 rtrsp->rt_delay_ms = longdelay_ms; 372 completed = cur_ops->get_gp_seq(); 373 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 374 started, completed); 375 } 376 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 377 udelay(shortdelay_us); 378 rtrsp->rt_delay_us = shortdelay_us; 379 } 380 if (!preempt_count() && 381 !(torture_random(rrsp) % (nrealreaders * 500))) { 382 torture_preempt_schedule(); /* QS only if preemptible. */ 383 rtrsp->rt_preempted = true; 384 } 385 } 386 387 static void rcu_torture_read_unlock(int idx) __releases(RCU) 388 { 389 rcu_read_unlock(); 390 } 391 392 /* 393 * Update callback in the pipe. This should be invoked after a grace period. 394 */ 395 static bool 396 rcu_torture_pipe_update_one(struct rcu_torture *rp) 397 { 398 int i; 399 400 i = READ_ONCE(rp->rtort_pipe_count); 401 if (i > RCU_TORTURE_PIPE_LEN) 402 i = RCU_TORTURE_PIPE_LEN; 403 atomic_inc(&rcu_torture_wcount[i]); 404 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 405 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 406 rp->rtort_mbtest = 0; 407 return true; 408 } 409 return false; 410 } 411 412 /* 413 * Update all callbacks in the pipe. Suitable for synchronous grace-period 414 * primitives. 415 */ 416 static void 417 rcu_torture_pipe_update(struct rcu_torture *old_rp) 418 { 419 struct rcu_torture *rp; 420 struct rcu_torture *rp1; 421 422 if (old_rp) 423 list_add(&old_rp->rtort_free, &rcu_torture_removed); 424 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 425 if (rcu_torture_pipe_update_one(rp)) { 426 list_del(&rp->rtort_free); 427 rcu_torture_free(rp); 428 } 429 } 430 } 431 432 static void 433 rcu_torture_cb(struct rcu_head *p) 434 { 435 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 436 437 if (torture_must_stop_irq()) { 438 /* Test is ending, just drop callbacks on the floor. */ 439 /* The next initialization will pick up the pieces. */ 440 return; 441 } 442 if (rcu_torture_pipe_update_one(rp)) 443 rcu_torture_free(rp); 444 else 445 cur_ops->deferred_free(rp); 446 } 447 448 static unsigned long rcu_no_completed(void) 449 { 450 return 0; 451 } 452 453 static void rcu_torture_deferred_free(struct rcu_torture *p) 454 { 455 call_rcu(&p->rtort_rcu, rcu_torture_cb); 456 } 457 458 static void rcu_sync_torture_init(void) 459 { 460 INIT_LIST_HEAD(&rcu_torture_removed); 461 } 462 463 static struct rcu_torture_ops rcu_ops = { 464 .ttype = RCU_FLAVOR, 465 .init = rcu_sync_torture_init, 466 .readlock = rcu_torture_read_lock, 467 .read_delay = rcu_read_delay, 468 .readunlock = rcu_torture_read_unlock, 469 .get_gp_seq = rcu_get_gp_seq, 470 .gp_diff = rcu_seq_diff, 471 .deferred_free = rcu_torture_deferred_free, 472 .sync = synchronize_rcu, 473 .exp_sync = synchronize_rcu_expedited, 474 .get_state = get_state_synchronize_rcu, 475 .cond_sync = cond_synchronize_rcu, 476 .call = call_rcu, 477 .cb_barrier = rcu_barrier, 478 .fqs = rcu_force_quiescent_state, 479 .stats = NULL, 480 .stall_dur = rcu_jiffies_till_stall_check, 481 .irq_capable = 1, 482 .can_boost = rcu_can_boost(), 483 .extendables = RCUTORTURE_MAX_EXTEND, 484 .name = "rcu" 485 }; 486 487 /* 488 * Don't even think about trying any of these in real life!!! 489 * The names includes "busted", and they really means it! 490 * The only purpose of these functions is to provide a buggy RCU 491 * implementation to make sure that rcutorture correctly emits 492 * buggy-RCU error messages. 493 */ 494 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 495 { 496 /* This is a deliberate bug for testing purposes only! */ 497 rcu_torture_cb(&p->rtort_rcu); 498 } 499 500 static void synchronize_rcu_busted(void) 501 { 502 /* This is a deliberate bug for testing purposes only! */ 503 } 504 505 static void 506 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 507 { 508 /* This is a deliberate bug for testing purposes only! */ 509 func(head); 510 } 511 512 static struct rcu_torture_ops rcu_busted_ops = { 513 .ttype = INVALID_RCU_FLAVOR, 514 .init = rcu_sync_torture_init, 515 .readlock = rcu_torture_read_lock, 516 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 517 .readunlock = rcu_torture_read_unlock, 518 .get_gp_seq = rcu_no_completed, 519 .deferred_free = rcu_busted_torture_deferred_free, 520 .sync = synchronize_rcu_busted, 521 .exp_sync = synchronize_rcu_busted, 522 .call = call_rcu_busted, 523 .cb_barrier = NULL, 524 .fqs = NULL, 525 .stats = NULL, 526 .irq_capable = 1, 527 .name = "busted" 528 }; 529 530 /* 531 * Definitions for srcu torture testing. 532 */ 533 534 DEFINE_STATIC_SRCU(srcu_ctl); 535 static struct srcu_struct srcu_ctld; 536 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 537 538 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 539 { 540 return srcu_read_lock(srcu_ctlp); 541 } 542 543 static void 544 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 545 { 546 long delay; 547 const long uspertick = 1000000 / HZ; 548 const long longdelay = 10; 549 550 /* We want there to be long-running readers, but not all the time. */ 551 552 delay = torture_random(rrsp) % 553 (nrealreaders * 2 * longdelay * uspertick); 554 if (!delay && in_task()) { 555 schedule_timeout_interruptible(longdelay); 556 rtrsp->rt_delay_jiffies = longdelay; 557 } else { 558 rcu_read_delay(rrsp, rtrsp); 559 } 560 } 561 562 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 563 { 564 srcu_read_unlock(srcu_ctlp, idx); 565 } 566 567 static unsigned long srcu_torture_completed(void) 568 { 569 return srcu_batches_completed(srcu_ctlp); 570 } 571 572 static void srcu_torture_deferred_free(struct rcu_torture *rp) 573 { 574 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 575 } 576 577 static void srcu_torture_synchronize(void) 578 { 579 synchronize_srcu(srcu_ctlp); 580 } 581 582 static void srcu_torture_call(struct rcu_head *head, 583 rcu_callback_t func) 584 { 585 call_srcu(srcu_ctlp, head, func); 586 } 587 588 static void srcu_torture_barrier(void) 589 { 590 srcu_barrier(srcu_ctlp); 591 } 592 593 static void srcu_torture_stats(void) 594 { 595 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 596 } 597 598 static void srcu_torture_synchronize_expedited(void) 599 { 600 synchronize_srcu_expedited(srcu_ctlp); 601 } 602 603 static struct rcu_torture_ops srcu_ops = { 604 .ttype = SRCU_FLAVOR, 605 .init = rcu_sync_torture_init, 606 .readlock = srcu_torture_read_lock, 607 .read_delay = srcu_read_delay, 608 .readunlock = srcu_torture_read_unlock, 609 .get_gp_seq = srcu_torture_completed, 610 .deferred_free = srcu_torture_deferred_free, 611 .sync = srcu_torture_synchronize, 612 .exp_sync = srcu_torture_synchronize_expedited, 613 .call = srcu_torture_call, 614 .cb_barrier = srcu_torture_barrier, 615 .stats = srcu_torture_stats, 616 .irq_capable = 1, 617 .name = "srcu" 618 }; 619 620 static void srcu_torture_init(void) 621 { 622 rcu_sync_torture_init(); 623 WARN_ON(init_srcu_struct(&srcu_ctld)); 624 srcu_ctlp = &srcu_ctld; 625 } 626 627 static void srcu_torture_cleanup(void) 628 { 629 cleanup_srcu_struct(&srcu_ctld); 630 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 631 } 632 633 /* As above, but dynamically allocated. */ 634 static struct rcu_torture_ops srcud_ops = { 635 .ttype = SRCU_FLAVOR, 636 .init = srcu_torture_init, 637 .cleanup = srcu_torture_cleanup, 638 .readlock = srcu_torture_read_lock, 639 .read_delay = srcu_read_delay, 640 .readunlock = srcu_torture_read_unlock, 641 .get_gp_seq = srcu_torture_completed, 642 .deferred_free = srcu_torture_deferred_free, 643 .sync = srcu_torture_synchronize, 644 .exp_sync = srcu_torture_synchronize_expedited, 645 .call = srcu_torture_call, 646 .cb_barrier = srcu_torture_barrier, 647 .stats = srcu_torture_stats, 648 .irq_capable = 1, 649 .name = "srcud" 650 }; 651 652 /* As above, but broken due to inappropriate reader extension. */ 653 static struct rcu_torture_ops busted_srcud_ops = { 654 .ttype = SRCU_FLAVOR, 655 .init = srcu_torture_init, 656 .cleanup = srcu_torture_cleanup, 657 .readlock = srcu_torture_read_lock, 658 .read_delay = rcu_read_delay, 659 .readunlock = srcu_torture_read_unlock, 660 .get_gp_seq = srcu_torture_completed, 661 .deferred_free = srcu_torture_deferred_free, 662 .sync = srcu_torture_synchronize, 663 .exp_sync = srcu_torture_synchronize_expedited, 664 .call = srcu_torture_call, 665 .cb_barrier = srcu_torture_barrier, 666 .stats = srcu_torture_stats, 667 .irq_capable = 1, 668 .extendables = RCUTORTURE_MAX_EXTEND, 669 .name = "busted_srcud" 670 }; 671 672 /* 673 * Definitions for RCU-tasks torture testing. 674 */ 675 676 static int tasks_torture_read_lock(void) 677 { 678 return 0; 679 } 680 681 static void tasks_torture_read_unlock(int idx) 682 { 683 } 684 685 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 686 { 687 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 688 } 689 690 static void synchronize_rcu_mult_test(void) 691 { 692 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 693 } 694 695 static struct rcu_torture_ops tasks_ops = { 696 .ttype = RCU_TASKS_FLAVOR, 697 .init = rcu_sync_torture_init, 698 .readlock = tasks_torture_read_lock, 699 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 700 .readunlock = tasks_torture_read_unlock, 701 .get_gp_seq = rcu_no_completed, 702 .deferred_free = rcu_tasks_torture_deferred_free, 703 .sync = synchronize_rcu_tasks, 704 .exp_sync = synchronize_rcu_mult_test, 705 .call = call_rcu_tasks, 706 .cb_barrier = rcu_barrier_tasks, 707 .fqs = NULL, 708 .stats = NULL, 709 .irq_capable = 1, 710 .slow_gps = 1, 711 .name = "tasks" 712 }; 713 714 /* 715 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 716 * This implementation does not necessarily work well with CPU hotplug. 717 */ 718 719 static void synchronize_rcu_trivial(void) 720 { 721 int cpu; 722 723 for_each_online_cpu(cpu) { 724 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 725 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 726 } 727 } 728 729 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 730 { 731 preempt_disable(); 732 return 0; 733 } 734 735 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 736 { 737 preempt_enable(); 738 } 739 740 static struct rcu_torture_ops trivial_ops = { 741 .ttype = RCU_TRIVIAL_FLAVOR, 742 .init = rcu_sync_torture_init, 743 .readlock = rcu_torture_read_lock_trivial, 744 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 745 .readunlock = rcu_torture_read_unlock_trivial, 746 .get_gp_seq = rcu_no_completed, 747 .sync = synchronize_rcu_trivial, 748 .exp_sync = synchronize_rcu_trivial, 749 .fqs = NULL, 750 .stats = NULL, 751 .irq_capable = 1, 752 .name = "trivial" 753 }; 754 755 /* 756 * Definitions for rude RCU-tasks torture testing. 757 */ 758 759 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 760 { 761 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 762 } 763 764 static struct rcu_torture_ops tasks_rude_ops = { 765 .ttype = RCU_TASKS_RUDE_FLAVOR, 766 .init = rcu_sync_torture_init, 767 .readlock = rcu_torture_read_lock_trivial, 768 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 769 .readunlock = rcu_torture_read_unlock_trivial, 770 .get_gp_seq = rcu_no_completed, 771 .deferred_free = rcu_tasks_rude_torture_deferred_free, 772 .sync = synchronize_rcu_tasks_rude, 773 .exp_sync = synchronize_rcu_tasks_rude, 774 .call = call_rcu_tasks_rude, 775 .cb_barrier = rcu_barrier_tasks_rude, 776 .fqs = NULL, 777 .stats = NULL, 778 .irq_capable = 1, 779 .name = "tasks-rude" 780 }; 781 782 /* 783 * Definitions for tracing RCU-tasks torture testing. 784 */ 785 786 static int tasks_tracing_torture_read_lock(void) 787 { 788 rcu_read_lock_trace(); 789 return 0; 790 } 791 792 static void tasks_tracing_torture_read_unlock(int idx) 793 { 794 rcu_read_unlock_trace(); 795 } 796 797 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 798 { 799 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 800 } 801 802 static struct rcu_torture_ops tasks_tracing_ops = { 803 .ttype = RCU_TASKS_TRACING_FLAVOR, 804 .init = rcu_sync_torture_init, 805 .readlock = tasks_tracing_torture_read_lock, 806 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 807 .readunlock = tasks_tracing_torture_read_unlock, 808 .get_gp_seq = rcu_no_completed, 809 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 810 .sync = synchronize_rcu_tasks_trace, 811 .exp_sync = synchronize_rcu_tasks_trace, 812 .call = call_rcu_tasks_trace, 813 .cb_barrier = rcu_barrier_tasks_trace, 814 .fqs = NULL, 815 .stats = NULL, 816 .irq_capable = 1, 817 .slow_gps = 1, 818 .name = "tasks-tracing" 819 }; 820 821 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 822 { 823 if (!cur_ops->gp_diff) 824 return new - old; 825 return cur_ops->gp_diff(new, old); 826 } 827 828 static bool __maybe_unused torturing_tasks(void) 829 { 830 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 831 } 832 833 /* 834 * RCU torture priority-boost testing. Runs one real-time thread per 835 * CPU for moderate bursts, repeatedly registering RCU callbacks and 836 * spinning waiting for them to be invoked. If a given callback takes 837 * too long to be invoked, we assume that priority inversion has occurred. 838 */ 839 840 struct rcu_boost_inflight { 841 struct rcu_head rcu; 842 int inflight; 843 }; 844 845 static void rcu_torture_boost_cb(struct rcu_head *head) 846 { 847 struct rcu_boost_inflight *rbip = 848 container_of(head, struct rcu_boost_inflight, rcu); 849 850 /* Ensure RCU-core accesses precede clearing ->inflight */ 851 smp_store_release(&rbip->inflight, 0); 852 } 853 854 static int old_rt_runtime = -1; 855 856 static void rcu_torture_disable_rt_throttle(void) 857 { 858 /* 859 * Disable RT throttling so that rcutorture's boost threads don't get 860 * throttled. Only possible if rcutorture is built-in otherwise the 861 * user should manually do this by setting the sched_rt_period_us and 862 * sched_rt_runtime sysctls. 863 */ 864 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 865 return; 866 867 old_rt_runtime = sysctl_sched_rt_runtime; 868 sysctl_sched_rt_runtime = -1; 869 } 870 871 static void rcu_torture_enable_rt_throttle(void) 872 { 873 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 874 return; 875 876 sysctl_sched_rt_runtime = old_rt_runtime; 877 old_rt_runtime = -1; 878 } 879 880 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 881 { 882 if (end - start > test_boost_duration * HZ - HZ / 2) { 883 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 884 n_rcu_torture_boost_failure++; 885 886 return true; /* failed */ 887 } 888 889 return false; /* passed */ 890 } 891 892 static int rcu_torture_boost(void *arg) 893 { 894 unsigned long call_rcu_time; 895 unsigned long endtime; 896 unsigned long oldstarttime; 897 struct rcu_boost_inflight rbi = { .inflight = 0 }; 898 struct sched_param sp; 899 900 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 901 902 /* Set real-time priority. */ 903 sp.sched_priority = 1; 904 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 905 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 906 n_rcu_torture_boost_rterror++; 907 } 908 909 init_rcu_head_on_stack(&rbi.rcu); 910 /* Each pass through the following loop does one boost-test cycle. */ 911 do { 912 /* Track if the test failed already in this test interval? */ 913 bool failed = false; 914 915 /* Increment n_rcu_torture_boosts once per boost-test */ 916 while (!kthread_should_stop()) { 917 if (mutex_trylock(&boost_mutex)) { 918 n_rcu_torture_boosts++; 919 mutex_unlock(&boost_mutex); 920 break; 921 } 922 schedule_timeout_uninterruptible(1); 923 } 924 if (kthread_should_stop()) 925 goto checkwait; 926 927 /* Wait for the next test interval. */ 928 oldstarttime = boost_starttime; 929 while (time_before(jiffies, oldstarttime)) { 930 schedule_timeout_interruptible(oldstarttime - jiffies); 931 stutter_wait("rcu_torture_boost"); 932 if (torture_must_stop()) 933 goto checkwait; 934 } 935 936 /* Do one boost-test interval. */ 937 endtime = oldstarttime + test_boost_duration * HZ; 938 call_rcu_time = jiffies; 939 while (time_before(jiffies, endtime)) { 940 /* If we don't have a callback in flight, post one. */ 941 if (!smp_load_acquire(&rbi.inflight)) { 942 /* RCU core before ->inflight = 1. */ 943 smp_store_release(&rbi.inflight, 1); 944 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 945 /* Check if the boost test failed */ 946 failed = failed || 947 rcu_torture_boost_failed(call_rcu_time, 948 jiffies); 949 call_rcu_time = jiffies; 950 } 951 stutter_wait("rcu_torture_boost"); 952 if (torture_must_stop()) 953 goto checkwait; 954 } 955 956 /* 957 * If boost never happened, then inflight will always be 1, in 958 * this case the boost check would never happen in the above 959 * loop so do another one here. 960 */ 961 if (!failed && smp_load_acquire(&rbi.inflight)) 962 rcu_torture_boost_failed(call_rcu_time, jiffies); 963 964 /* 965 * Set the start time of the next test interval. 966 * Yes, this is vulnerable to long delays, but such 967 * delays simply cause a false negative for the next 968 * interval. Besides, we are running at RT priority, 969 * so delays should be relatively rare. 970 */ 971 while (oldstarttime == boost_starttime && 972 !kthread_should_stop()) { 973 if (mutex_trylock(&boost_mutex)) { 974 boost_starttime = jiffies + 975 test_boost_interval * HZ; 976 mutex_unlock(&boost_mutex); 977 break; 978 } 979 schedule_timeout_uninterruptible(1); 980 } 981 982 /* Go do the stutter. */ 983 checkwait: stutter_wait("rcu_torture_boost"); 984 } while (!torture_must_stop()); 985 986 /* Clean up and exit. */ 987 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 988 torture_shutdown_absorb("rcu_torture_boost"); 989 schedule_timeout_uninterruptible(1); 990 } 991 destroy_rcu_head_on_stack(&rbi.rcu); 992 torture_kthread_stopping("rcu_torture_boost"); 993 return 0; 994 } 995 996 /* 997 * RCU torture force-quiescent-state kthread. Repeatedly induces 998 * bursts of calls to force_quiescent_state(), increasing the probability 999 * of occurrence of some important types of race conditions. 1000 */ 1001 static int 1002 rcu_torture_fqs(void *arg) 1003 { 1004 unsigned long fqs_resume_time; 1005 int fqs_burst_remaining; 1006 1007 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1008 do { 1009 fqs_resume_time = jiffies + fqs_stutter * HZ; 1010 while (time_before(jiffies, fqs_resume_time) && 1011 !kthread_should_stop()) { 1012 schedule_timeout_interruptible(1); 1013 } 1014 fqs_burst_remaining = fqs_duration; 1015 while (fqs_burst_remaining > 0 && 1016 !kthread_should_stop()) { 1017 cur_ops->fqs(); 1018 udelay(fqs_holdoff); 1019 fqs_burst_remaining -= fqs_holdoff; 1020 } 1021 stutter_wait("rcu_torture_fqs"); 1022 } while (!torture_must_stop()); 1023 torture_kthread_stopping("rcu_torture_fqs"); 1024 return 0; 1025 } 1026 1027 /* 1028 * RCU torture writer kthread. Repeatedly substitutes a new structure 1029 * for that pointed to by rcu_torture_current, freeing the old structure 1030 * after a series of grace periods (the "pipeline"). 1031 */ 1032 static int 1033 rcu_torture_writer(void *arg) 1034 { 1035 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1036 int expediting = 0; 1037 unsigned long gp_snap; 1038 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1039 bool gp_sync1 = gp_sync; 1040 int i; 1041 struct rcu_torture *rp; 1042 struct rcu_torture *old_rp; 1043 static DEFINE_TORTURE_RANDOM(rand); 1044 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 1045 RTWS_COND_GET, RTWS_SYNC }; 1046 int nsynctypes = 0; 1047 1048 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1049 if (!can_expedite) 1050 pr_alert("%s" TORTURE_FLAG 1051 " GP expediting controlled from boot/sysfs for %s.\n", 1052 torture_type, cur_ops->name); 1053 1054 /* Initialize synctype[] array. If none set, take default. */ 1055 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 1056 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 1057 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 1058 synctype[nsynctypes++] = RTWS_COND_GET; 1059 pr_info("%s: Testing conditional GPs.\n", __func__); 1060 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 1061 pr_alert("%s: gp_cond without primitives.\n", __func__); 1062 } 1063 if (gp_exp1 && cur_ops->exp_sync) { 1064 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1065 pr_info("%s: Testing expedited GPs.\n", __func__); 1066 } else if (gp_exp && !cur_ops->exp_sync) { 1067 pr_alert("%s: gp_exp without primitives.\n", __func__); 1068 } 1069 if (gp_normal1 && cur_ops->deferred_free) { 1070 synctype[nsynctypes++] = RTWS_DEF_FREE; 1071 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1072 } else if (gp_normal && !cur_ops->deferred_free) { 1073 pr_alert("%s: gp_normal without primitives.\n", __func__); 1074 } 1075 if (gp_sync1 && cur_ops->sync) { 1076 synctype[nsynctypes++] = RTWS_SYNC; 1077 pr_info("%s: Testing normal GPs.\n", __func__); 1078 } else if (gp_sync && !cur_ops->sync) { 1079 pr_alert("%s: gp_sync without primitives.\n", __func__); 1080 } 1081 if (WARN_ONCE(nsynctypes == 0, 1082 "rcu_torture_writer: No update-side primitives.\n")) { 1083 /* 1084 * No updates primitives, so don't try updating. 1085 * The resulting test won't be testing much, hence the 1086 * above WARN_ONCE(). 1087 */ 1088 rcu_torture_writer_state = RTWS_STOPPING; 1089 torture_kthread_stopping("rcu_torture_writer"); 1090 } 1091 1092 do { 1093 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1094 schedule_timeout_uninterruptible(1); 1095 rp = rcu_torture_alloc(); 1096 if (rp == NULL) 1097 continue; 1098 rp->rtort_pipe_count = 0; 1099 rcu_torture_writer_state = RTWS_DELAY; 1100 udelay(torture_random(&rand) & 0x3ff); 1101 rcu_torture_writer_state = RTWS_REPLACE; 1102 old_rp = rcu_dereference_check(rcu_torture_current, 1103 current == writer_task); 1104 rp->rtort_mbtest = 1; 1105 rcu_assign_pointer(rcu_torture_current, rp); 1106 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1107 if (old_rp) { 1108 i = old_rp->rtort_pipe_count; 1109 if (i > RCU_TORTURE_PIPE_LEN) 1110 i = RCU_TORTURE_PIPE_LEN; 1111 atomic_inc(&rcu_torture_wcount[i]); 1112 WRITE_ONCE(old_rp->rtort_pipe_count, 1113 old_rp->rtort_pipe_count + 1); 1114 switch (synctype[torture_random(&rand) % nsynctypes]) { 1115 case RTWS_DEF_FREE: 1116 rcu_torture_writer_state = RTWS_DEF_FREE; 1117 cur_ops->deferred_free(old_rp); 1118 break; 1119 case RTWS_EXP_SYNC: 1120 rcu_torture_writer_state = RTWS_EXP_SYNC; 1121 cur_ops->exp_sync(); 1122 rcu_torture_pipe_update(old_rp); 1123 break; 1124 case RTWS_COND_GET: 1125 rcu_torture_writer_state = RTWS_COND_GET; 1126 gp_snap = cur_ops->get_state(); 1127 i = torture_random(&rand) % 16; 1128 if (i != 0) 1129 schedule_timeout_interruptible(i); 1130 udelay(torture_random(&rand) % 1000); 1131 rcu_torture_writer_state = RTWS_COND_SYNC; 1132 cur_ops->cond_sync(gp_snap); 1133 rcu_torture_pipe_update(old_rp); 1134 break; 1135 case RTWS_SYNC: 1136 rcu_torture_writer_state = RTWS_SYNC; 1137 cur_ops->sync(); 1138 rcu_torture_pipe_update(old_rp); 1139 break; 1140 default: 1141 WARN_ON_ONCE(1); 1142 break; 1143 } 1144 } 1145 WRITE_ONCE(rcu_torture_current_version, 1146 rcu_torture_current_version + 1); 1147 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1148 if (can_expedite && 1149 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1150 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1151 if (expediting >= 0) 1152 rcu_expedite_gp(); 1153 else 1154 rcu_unexpedite_gp(); 1155 if (++expediting > 3) 1156 expediting = -expediting; 1157 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1158 can_expedite = !rcu_gp_is_expedited() && 1159 !rcu_gp_is_normal(); 1160 } 1161 rcu_torture_writer_state = RTWS_STUTTER; 1162 if (stutter_wait("rcu_torture_writer") && 1163 !READ_ONCE(rcu_fwd_cb_nodelay) && 1164 !cur_ops->slow_gps && 1165 !torture_must_stop() && 1166 rcu_inkernel_boot_has_ended()) 1167 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1168 if (list_empty(&rcu_tortures[i].rtort_free) && 1169 rcu_access_pointer(rcu_torture_current) != 1170 &rcu_tortures[i]) { 1171 rcu_ftrace_dump(DUMP_ALL); 1172 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1173 } 1174 } while (!torture_must_stop()); 1175 rcu_torture_current = NULL; // Let stats task know that we are done. 1176 /* Reset expediting back to unexpedited. */ 1177 if (expediting > 0) 1178 expediting = -expediting; 1179 while (can_expedite && expediting++ < 0) 1180 rcu_unexpedite_gp(); 1181 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1182 if (!can_expedite) 1183 pr_alert("%s" TORTURE_FLAG 1184 " Dynamic grace-period expediting was disabled.\n", 1185 torture_type); 1186 rcu_torture_writer_state = RTWS_STOPPING; 1187 torture_kthread_stopping("rcu_torture_writer"); 1188 return 0; 1189 } 1190 1191 /* 1192 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1193 * delay between calls. 1194 */ 1195 static int 1196 rcu_torture_fakewriter(void *arg) 1197 { 1198 DEFINE_TORTURE_RANDOM(rand); 1199 1200 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1201 set_user_nice(current, MAX_NICE); 1202 1203 do { 1204 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1205 udelay(torture_random(&rand) & 0x3ff); 1206 if (cur_ops->cb_barrier != NULL && 1207 torture_random(&rand) % (nfakewriters * 8) == 0) { 1208 cur_ops->cb_barrier(); 1209 } else if (gp_normal == gp_exp) { 1210 if (cur_ops->sync && torture_random(&rand) & 0x80) 1211 cur_ops->sync(); 1212 else if (cur_ops->exp_sync) 1213 cur_ops->exp_sync(); 1214 } else if (gp_normal && cur_ops->sync) { 1215 cur_ops->sync(); 1216 } else if (cur_ops->exp_sync) { 1217 cur_ops->exp_sync(); 1218 } 1219 stutter_wait("rcu_torture_fakewriter"); 1220 } while (!torture_must_stop()); 1221 1222 torture_kthread_stopping("rcu_torture_fakewriter"); 1223 return 0; 1224 } 1225 1226 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1227 { 1228 kfree(rhp); 1229 } 1230 1231 /* 1232 * Do one extension of an RCU read-side critical section using the 1233 * current reader state in readstate (set to zero for initial entry 1234 * to extended critical section), set the new state as specified by 1235 * newstate (set to zero for final exit from extended critical section), 1236 * and random-number-generator state in trsp. If this is neither the 1237 * beginning or end of the critical section and if there was actually a 1238 * change, do a ->read_delay(). 1239 */ 1240 static void rcutorture_one_extend(int *readstate, int newstate, 1241 struct torture_random_state *trsp, 1242 struct rt_read_seg *rtrsp) 1243 { 1244 unsigned long flags; 1245 int idxnew = -1; 1246 int idxold = *readstate; 1247 int statesnew = ~*readstate & newstate; 1248 int statesold = *readstate & ~newstate; 1249 1250 WARN_ON_ONCE(idxold < 0); 1251 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1252 rtrsp->rt_readstate = newstate; 1253 1254 /* First, put new protection in place to avoid critical-section gap. */ 1255 if (statesnew & RCUTORTURE_RDR_BH) 1256 local_bh_disable(); 1257 if (statesnew & RCUTORTURE_RDR_IRQ) 1258 local_irq_disable(); 1259 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1260 preempt_disable(); 1261 if (statesnew & RCUTORTURE_RDR_RBH) 1262 rcu_read_lock_bh(); 1263 if (statesnew & RCUTORTURE_RDR_SCHED) 1264 rcu_read_lock_sched(); 1265 if (statesnew & RCUTORTURE_RDR_RCU) 1266 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1267 1268 /* Next, remove old protection, irq first due to bh conflict. */ 1269 if (statesold & RCUTORTURE_RDR_IRQ) 1270 local_irq_enable(); 1271 if (statesold & RCUTORTURE_RDR_BH) 1272 local_bh_enable(); 1273 if (statesold & RCUTORTURE_RDR_PREEMPT) 1274 preempt_enable(); 1275 if (statesold & RCUTORTURE_RDR_RBH) 1276 rcu_read_unlock_bh(); 1277 if (statesold & RCUTORTURE_RDR_SCHED) 1278 rcu_read_unlock_sched(); 1279 if (statesold & RCUTORTURE_RDR_RCU) { 1280 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1281 1282 if (lockit) 1283 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1284 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1285 if (lockit) 1286 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1287 } 1288 1289 /* Delay if neither beginning nor end and there was a change. */ 1290 if ((statesnew || statesold) && *readstate && newstate) 1291 cur_ops->read_delay(trsp, rtrsp); 1292 1293 /* Update the reader state. */ 1294 if (idxnew == -1) 1295 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1296 WARN_ON_ONCE(idxnew < 0); 1297 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1298 *readstate = idxnew | newstate; 1299 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1300 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1301 } 1302 1303 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1304 static int rcutorture_extend_mask_max(void) 1305 { 1306 int mask; 1307 1308 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1309 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1310 mask = mask | RCUTORTURE_RDR_RCU; 1311 return mask; 1312 } 1313 1314 /* Return a random protection state mask, but with at least one bit set. */ 1315 static int 1316 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1317 { 1318 int mask = rcutorture_extend_mask_max(); 1319 unsigned long randmask1 = torture_random(trsp) >> 8; 1320 unsigned long randmask2 = randmask1 >> 3; 1321 1322 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1323 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1324 if (!(randmask1 & 0x7)) 1325 mask = mask & randmask2; 1326 else 1327 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1328 /* Can't enable bh w/irq disabled. */ 1329 if ((mask & RCUTORTURE_RDR_IRQ) && 1330 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1331 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1332 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1333 return mask ?: RCUTORTURE_RDR_RCU; 1334 } 1335 1336 /* 1337 * Do a randomly selected number of extensions of an existing RCU read-side 1338 * critical section. 1339 */ 1340 static struct rt_read_seg * 1341 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1342 struct rt_read_seg *rtrsp) 1343 { 1344 int i; 1345 int j; 1346 int mask = rcutorture_extend_mask_max(); 1347 1348 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1349 if (!((mask - 1) & mask)) 1350 return rtrsp; /* Current RCU reader not extendable. */ 1351 /* Bias towards larger numbers of loops. */ 1352 i = (torture_random(trsp) >> 3); 1353 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1354 for (j = 0; j < i; j++) { 1355 mask = rcutorture_extend_mask(*readstate, trsp); 1356 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1357 } 1358 return &rtrsp[j]; 1359 } 1360 1361 /* 1362 * Do one read-side critical section, returning false if there was 1363 * no data to read. Can be invoked both from process context and 1364 * from a timer handler. 1365 */ 1366 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1367 { 1368 int i; 1369 unsigned long started; 1370 unsigned long completed; 1371 int newstate; 1372 struct rcu_torture *p; 1373 int pipe_count; 1374 int readstate = 0; 1375 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1376 struct rt_read_seg *rtrsp = &rtseg[0]; 1377 struct rt_read_seg *rtrsp1; 1378 unsigned long long ts; 1379 1380 WARN_ON_ONCE(!rcu_is_watching()); 1381 newstate = rcutorture_extend_mask(readstate, trsp); 1382 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1383 started = cur_ops->get_gp_seq(); 1384 ts = rcu_trace_clock_local(); 1385 p = rcu_dereference_check(rcu_torture_current, 1386 rcu_read_lock_bh_held() || 1387 rcu_read_lock_sched_held() || 1388 srcu_read_lock_held(srcu_ctlp) || 1389 rcu_read_lock_trace_held() || 1390 torturing_tasks()); 1391 if (p == NULL) { 1392 /* Wait for rcu_torture_writer to get underway */ 1393 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1394 return false; 1395 } 1396 if (p->rtort_mbtest == 0) 1397 atomic_inc(&n_rcu_torture_mberror); 1398 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1399 preempt_disable(); 1400 pipe_count = READ_ONCE(p->rtort_pipe_count); 1401 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1402 /* Should not happen, but... */ 1403 pipe_count = RCU_TORTURE_PIPE_LEN; 1404 } 1405 completed = cur_ops->get_gp_seq(); 1406 if (pipe_count > 1) { 1407 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1408 ts, started, completed); 1409 rcu_ftrace_dump(DUMP_ALL); 1410 } 1411 __this_cpu_inc(rcu_torture_count[pipe_count]); 1412 completed = rcutorture_seq_diff(completed, started); 1413 if (completed > RCU_TORTURE_PIPE_LEN) { 1414 /* Should not happen, but... */ 1415 completed = RCU_TORTURE_PIPE_LEN; 1416 } 1417 __this_cpu_inc(rcu_torture_batch[completed]); 1418 preempt_enable(); 1419 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1420 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1421 1422 /* If error or close call, record the sequence of reader protections. */ 1423 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1424 i = 0; 1425 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1426 err_segs[i++] = *rtrsp1; 1427 rt_read_nsegs = i; 1428 } 1429 1430 return true; 1431 } 1432 1433 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1434 1435 /* 1436 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1437 * incrementing the corresponding element of the pipeline array. The 1438 * counter in the element should never be greater than 1, otherwise, the 1439 * RCU implementation is broken. 1440 */ 1441 static void rcu_torture_timer(struct timer_list *unused) 1442 { 1443 atomic_long_inc(&n_rcu_torture_timers); 1444 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1445 1446 /* Test call_rcu() invocation from interrupt handler. */ 1447 if (cur_ops->call) { 1448 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1449 1450 if (rhp) 1451 cur_ops->call(rhp, rcu_torture_timer_cb); 1452 } 1453 } 1454 1455 /* 1456 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1457 * incrementing the corresponding element of the pipeline array. The 1458 * counter in the element should never be greater than 1, otherwise, the 1459 * RCU implementation is broken. 1460 */ 1461 static int 1462 rcu_torture_reader(void *arg) 1463 { 1464 unsigned long lastsleep = jiffies; 1465 long myid = (long)arg; 1466 int mynumonline = myid; 1467 DEFINE_TORTURE_RANDOM(rand); 1468 struct timer_list t; 1469 1470 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1471 set_user_nice(current, MAX_NICE); 1472 if (irqreader && cur_ops->irq_capable) 1473 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1474 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1475 do { 1476 if (irqreader && cur_ops->irq_capable) { 1477 if (!timer_pending(&t)) 1478 mod_timer(&t, jiffies + 1); 1479 } 1480 if (!rcu_torture_one_read(&rand) && !torture_must_stop()) 1481 schedule_timeout_interruptible(HZ); 1482 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1483 schedule_timeout_interruptible(1); 1484 lastsleep = jiffies + 10; 1485 } 1486 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1487 schedule_timeout_interruptible(HZ / 5); 1488 stutter_wait("rcu_torture_reader"); 1489 } while (!torture_must_stop()); 1490 if (irqreader && cur_ops->irq_capable) { 1491 del_timer_sync(&t); 1492 destroy_timer_on_stack(&t); 1493 } 1494 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1495 torture_kthread_stopping("rcu_torture_reader"); 1496 return 0; 1497 } 1498 1499 /* 1500 * Print torture statistics. Caller must ensure that there is only 1501 * one call to this function at a given time!!! This is normally 1502 * accomplished by relying on the module system to only have one copy 1503 * of the module loaded, and then by giving the rcu_torture_stats 1504 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1505 * thread is not running). 1506 */ 1507 static void 1508 rcu_torture_stats_print(void) 1509 { 1510 int cpu; 1511 int i; 1512 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1513 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1514 struct rcu_torture *rtcp; 1515 static unsigned long rtcv_snap = ULONG_MAX; 1516 static bool splatted; 1517 struct task_struct *wtp; 1518 1519 for_each_possible_cpu(cpu) { 1520 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1521 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1522 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1523 } 1524 } 1525 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1526 if (pipesummary[i] != 0) 1527 break; 1528 } 1529 1530 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1531 rtcp = rcu_access_pointer(rcu_torture_current); 1532 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1533 rtcp, 1534 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1535 rcu_torture_current_version, 1536 list_empty(&rcu_torture_freelist), 1537 atomic_read(&n_rcu_torture_alloc), 1538 atomic_read(&n_rcu_torture_alloc_fail), 1539 atomic_read(&n_rcu_torture_free)); 1540 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1541 atomic_read(&n_rcu_torture_mberror), 1542 n_rcu_torture_barrier_error, 1543 n_rcu_torture_boost_ktrerror, 1544 n_rcu_torture_boost_rterror); 1545 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1546 n_rcu_torture_boost_failure, 1547 n_rcu_torture_boosts, 1548 atomic_long_read(&n_rcu_torture_timers)); 1549 torture_onoff_stats(); 1550 pr_cont("barrier: %ld/%ld:%ld ", 1551 data_race(n_barrier_successes), 1552 data_race(n_barrier_attempts), 1553 data_race(n_rcu_torture_barrier_error)); 1554 pr_cont("read-exits: %ld\n", data_race(n_read_exits)); 1555 1556 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1557 if (atomic_read(&n_rcu_torture_mberror) || 1558 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1559 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1560 i > 1) { 1561 pr_cont("%s", "!!! "); 1562 atomic_inc(&n_rcu_torture_error); 1563 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1564 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1565 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1566 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1567 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed 1568 WARN_ON_ONCE(i > 1); // Too-short grace period 1569 } 1570 pr_cont("Reader Pipe: "); 1571 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1572 pr_cont(" %ld", pipesummary[i]); 1573 pr_cont("\n"); 1574 1575 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1576 pr_cont("Reader Batch: "); 1577 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1578 pr_cont(" %ld", batchsummary[i]); 1579 pr_cont("\n"); 1580 1581 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1582 pr_cont("Free-Block Circulation: "); 1583 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1584 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1585 } 1586 pr_cont("\n"); 1587 1588 if (cur_ops->stats) 1589 cur_ops->stats(); 1590 if (rtcv_snap == rcu_torture_current_version && 1591 rcu_access_pointer(rcu_torture_current) && 1592 !rcu_stall_is_suppressed()) { 1593 int __maybe_unused flags = 0; 1594 unsigned long __maybe_unused gp_seq = 0; 1595 1596 rcutorture_get_gp_data(cur_ops->ttype, 1597 &flags, &gp_seq); 1598 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1599 &flags, &gp_seq); 1600 wtp = READ_ONCE(writer_task); 1601 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1602 rcu_torture_writer_state_getname(), 1603 rcu_torture_writer_state, gp_seq, flags, 1604 wtp == NULL ? ~0UL : wtp->state, 1605 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1606 if (!splatted && wtp) { 1607 sched_show_task(wtp); 1608 splatted = true; 1609 } 1610 show_rcu_gp_kthreads(); 1611 rcu_ftrace_dump(DUMP_ALL); 1612 } 1613 rtcv_snap = rcu_torture_current_version; 1614 } 1615 1616 /* 1617 * Periodically prints torture statistics, if periodic statistics printing 1618 * was specified via the stat_interval module parameter. 1619 */ 1620 static int 1621 rcu_torture_stats(void *arg) 1622 { 1623 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1624 do { 1625 schedule_timeout_interruptible(stat_interval * HZ); 1626 rcu_torture_stats_print(); 1627 torture_shutdown_absorb("rcu_torture_stats"); 1628 } while (!torture_must_stop()); 1629 torture_kthread_stopping("rcu_torture_stats"); 1630 return 0; 1631 } 1632 1633 static void 1634 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1635 { 1636 pr_alert("%s" TORTURE_FLAG 1637 "--- %s: nreaders=%d nfakewriters=%d " 1638 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1639 "shuffle_interval=%d stutter=%d irqreader=%d " 1640 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1641 "test_boost=%d/%d test_boost_interval=%d " 1642 "test_boost_duration=%d shutdown_secs=%d " 1643 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1644 "stall_cpu_block=%d " 1645 "n_barrier_cbs=%d " 1646 "onoff_interval=%d onoff_holdoff=%d " 1647 "read_exit_delay=%d read_exit_burst=%d\n", 1648 torture_type, tag, nrealreaders, nfakewriters, 1649 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1650 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1651 test_boost, cur_ops->can_boost, 1652 test_boost_interval, test_boost_duration, shutdown_secs, 1653 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1654 stall_cpu_block, 1655 n_barrier_cbs, 1656 onoff_interval, onoff_holdoff, 1657 read_exit_delay, read_exit_burst); 1658 } 1659 1660 static int rcutorture_booster_cleanup(unsigned int cpu) 1661 { 1662 struct task_struct *t; 1663 1664 if (boost_tasks[cpu] == NULL) 1665 return 0; 1666 mutex_lock(&boost_mutex); 1667 t = boost_tasks[cpu]; 1668 boost_tasks[cpu] = NULL; 1669 rcu_torture_enable_rt_throttle(); 1670 mutex_unlock(&boost_mutex); 1671 1672 /* This must be outside of the mutex, otherwise deadlock! */ 1673 torture_stop_kthread(rcu_torture_boost, t); 1674 return 0; 1675 } 1676 1677 static int rcutorture_booster_init(unsigned int cpu) 1678 { 1679 int retval; 1680 1681 if (boost_tasks[cpu] != NULL) 1682 return 0; /* Already created, nothing more to do. */ 1683 1684 /* Don't allow time recalculation while creating a new task. */ 1685 mutex_lock(&boost_mutex); 1686 rcu_torture_disable_rt_throttle(); 1687 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1688 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1689 cpu_to_node(cpu), 1690 "rcu_torture_boost"); 1691 if (IS_ERR(boost_tasks[cpu])) { 1692 retval = PTR_ERR(boost_tasks[cpu]); 1693 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1694 n_rcu_torture_boost_ktrerror++; 1695 boost_tasks[cpu] = NULL; 1696 mutex_unlock(&boost_mutex); 1697 return retval; 1698 } 1699 kthread_bind(boost_tasks[cpu], cpu); 1700 wake_up_process(boost_tasks[cpu]); 1701 mutex_unlock(&boost_mutex); 1702 return 0; 1703 } 1704 1705 /* 1706 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1707 * induces a CPU stall for the time specified by stall_cpu. 1708 */ 1709 static int rcu_torture_stall(void *args) 1710 { 1711 int idx; 1712 unsigned long stop_at; 1713 1714 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1715 if (stall_cpu_holdoff > 0) { 1716 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1717 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1718 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1719 } 1720 if (!kthread_should_stop() && stall_gp_kthread > 0) { 1721 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 1722 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 1723 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 1724 if (kthread_should_stop()) 1725 break; 1726 schedule_timeout_uninterruptible(HZ); 1727 } 1728 } 1729 if (!kthread_should_stop() && stall_cpu > 0) { 1730 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 1731 stop_at = ktime_get_seconds() + stall_cpu; 1732 /* RCU CPU stall is expected behavior in following code. */ 1733 idx = cur_ops->readlock(); 1734 if (stall_cpu_irqsoff) 1735 local_irq_disable(); 1736 else if (!stall_cpu_block) 1737 preempt_disable(); 1738 pr_alert("rcu_torture_stall start on CPU %d.\n", 1739 raw_smp_processor_id()); 1740 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1741 stop_at)) 1742 if (stall_cpu_block) 1743 schedule_timeout_uninterruptible(HZ); 1744 if (stall_cpu_irqsoff) 1745 local_irq_enable(); 1746 else if (!stall_cpu_block) 1747 preempt_enable(); 1748 cur_ops->readunlock(idx); 1749 } 1750 pr_alert("rcu_torture_stall end.\n"); 1751 torture_shutdown_absorb("rcu_torture_stall"); 1752 while (!kthread_should_stop()) 1753 schedule_timeout_interruptible(10 * HZ); 1754 return 0; 1755 } 1756 1757 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1758 static int __init rcu_torture_stall_init(void) 1759 { 1760 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 1761 return 0; 1762 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1763 } 1764 1765 /* State structure for forward-progress self-propagating RCU callback. */ 1766 struct fwd_cb_state { 1767 struct rcu_head rh; 1768 int stop; 1769 }; 1770 1771 /* 1772 * Forward-progress self-propagating RCU callback function. Because 1773 * callbacks run from softirq, this function is an implicit RCU read-side 1774 * critical section. 1775 */ 1776 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1777 { 1778 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1779 1780 if (READ_ONCE(fcsp->stop)) { 1781 WRITE_ONCE(fcsp->stop, 2); 1782 return; 1783 } 1784 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1785 } 1786 1787 /* State for continuous-flood RCU callbacks. */ 1788 struct rcu_fwd_cb { 1789 struct rcu_head rh; 1790 struct rcu_fwd_cb *rfc_next; 1791 struct rcu_fwd *rfc_rfp; 1792 int rfc_gps; 1793 }; 1794 1795 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1796 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1797 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1798 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1799 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1800 1801 struct rcu_launder_hist { 1802 long n_launders; 1803 unsigned long launder_gp_seq; 1804 }; 1805 1806 struct rcu_fwd { 1807 spinlock_t rcu_fwd_lock; 1808 struct rcu_fwd_cb *rcu_fwd_cb_head; 1809 struct rcu_fwd_cb **rcu_fwd_cb_tail; 1810 long n_launders_cb; 1811 unsigned long rcu_fwd_startat; 1812 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1813 unsigned long rcu_launder_gp_seq_start; 1814 }; 1815 1816 static struct rcu_fwd *rcu_fwds; 1817 static bool rcu_fwd_emergency_stop; 1818 1819 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 1820 { 1821 unsigned long gps; 1822 unsigned long gps_old; 1823 int i; 1824 int j; 1825 1826 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 1827 if (rfp->n_launders_hist[i].n_launders > 0) 1828 break; 1829 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1830 __func__, jiffies - rfp->rcu_fwd_startat); 1831 gps_old = rfp->rcu_launder_gp_seq_start; 1832 for (j = 0; j <= i; j++) { 1833 gps = rfp->n_launders_hist[j].launder_gp_seq; 1834 pr_cont(" %ds/%d: %ld:%ld", 1835 j + 1, FWD_CBS_HIST_DIV, 1836 rfp->n_launders_hist[j].n_launders, 1837 rcutorture_seq_diff(gps, gps_old)); 1838 gps_old = gps; 1839 } 1840 pr_cont("\n"); 1841 } 1842 1843 /* Callback function for continuous-flood RCU callbacks. */ 1844 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1845 { 1846 unsigned long flags; 1847 int i; 1848 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1849 struct rcu_fwd_cb **rfcpp; 1850 struct rcu_fwd *rfp = rfcp->rfc_rfp; 1851 1852 rfcp->rfc_next = NULL; 1853 rfcp->rfc_gps++; 1854 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1855 rfcpp = rfp->rcu_fwd_cb_tail; 1856 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 1857 WRITE_ONCE(*rfcpp, rfcp); 1858 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 1859 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1860 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 1861 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 1862 rfp->n_launders_hist[i].n_launders++; 1863 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1864 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1865 } 1866 1867 // Give the scheduler a chance, even on nohz_full CPUs. 1868 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 1869 { 1870 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1871 // Real call_rcu() floods hit userspace, so emulate that. 1872 if (need_resched() || (iter & 0xfff)) 1873 schedule(); 1874 return; 1875 } 1876 // No userspace emulation: CB invocation throttles call_rcu() 1877 cond_resched(); 1878 } 1879 1880 /* 1881 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1882 * test is over or because we hit an OOM event. 1883 */ 1884 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 1885 { 1886 unsigned long flags; 1887 unsigned long freed = 0; 1888 struct rcu_fwd_cb *rfcp; 1889 1890 for (;;) { 1891 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1892 rfcp = rfp->rcu_fwd_cb_head; 1893 if (!rfcp) { 1894 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1895 break; 1896 } 1897 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 1898 if (!rfp->rcu_fwd_cb_head) 1899 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 1900 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1901 kfree(rfcp); 1902 freed++; 1903 rcu_torture_fwd_prog_cond_resched(freed); 1904 if (tick_nohz_full_enabled()) { 1905 local_irq_save(flags); 1906 rcu_momentary_dyntick_idle(); 1907 local_irq_restore(flags); 1908 } 1909 } 1910 return freed; 1911 } 1912 1913 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1914 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 1915 int *tested, int *tested_tries) 1916 { 1917 unsigned long cver; 1918 unsigned long dur; 1919 struct fwd_cb_state fcs; 1920 unsigned long gps; 1921 int idx; 1922 int sd; 1923 int sd4; 1924 bool selfpropcb = false; 1925 unsigned long stopat; 1926 static DEFINE_TORTURE_RANDOM(trs); 1927 1928 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1929 init_rcu_head_on_stack(&fcs.rh); 1930 selfpropcb = true; 1931 } 1932 1933 /* Tight loop containing cond_resched(). */ 1934 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1935 cur_ops->sync(); /* Later readers see above write. */ 1936 if (selfpropcb) { 1937 WRITE_ONCE(fcs.stop, 0); 1938 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1939 } 1940 cver = READ_ONCE(rcu_torture_current_version); 1941 gps = cur_ops->get_gp_seq(); 1942 sd = cur_ops->stall_dur() + 1; 1943 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1944 dur = sd4 + torture_random(&trs) % (sd - sd4); 1945 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1946 stopat = rfp->rcu_fwd_startat + dur; 1947 while (time_before(jiffies, stopat) && 1948 !shutdown_time_arrived() && 1949 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1950 idx = cur_ops->readlock(); 1951 udelay(10); 1952 cur_ops->readunlock(idx); 1953 if (!fwd_progress_need_resched || need_resched()) 1954 cond_resched(); 1955 } 1956 (*tested_tries)++; 1957 if (!time_before(jiffies, stopat) && 1958 !shutdown_time_arrived() && 1959 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1960 (*tested)++; 1961 cver = READ_ONCE(rcu_torture_current_version) - cver; 1962 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1963 WARN_ON(!cver && gps < 2); 1964 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1965 } 1966 if (selfpropcb) { 1967 WRITE_ONCE(fcs.stop, 1); 1968 cur_ops->sync(); /* Wait for running CB to complete. */ 1969 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1970 } 1971 1972 if (selfpropcb) { 1973 WARN_ON(READ_ONCE(fcs.stop) != 2); 1974 destroy_rcu_head_on_stack(&fcs.rh); 1975 } 1976 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1977 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1978 } 1979 1980 /* Carry out call_rcu() forward-progress testing. */ 1981 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 1982 { 1983 unsigned long cver; 1984 unsigned long flags; 1985 unsigned long gps; 1986 int i; 1987 long n_launders; 1988 long n_launders_cb_snap; 1989 long n_launders_sa; 1990 long n_max_cbs; 1991 long n_max_gps; 1992 struct rcu_fwd_cb *rfcp; 1993 struct rcu_fwd_cb *rfcpn; 1994 unsigned long stopat; 1995 unsigned long stoppedat; 1996 1997 if (READ_ONCE(rcu_fwd_emergency_stop)) 1998 return; /* Get out of the way quickly, no GP wait! */ 1999 if (!cur_ops->call) 2000 return; /* Can't do call_rcu() fwd prog without ->call. */ 2001 2002 /* Loop continuously posting RCU callbacks. */ 2003 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 2004 cur_ops->sync(); /* Later readers see above write. */ 2005 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2006 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2007 n_launders = 0; 2008 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2009 n_launders_sa = 0; 2010 n_max_cbs = 0; 2011 n_max_gps = 0; 2012 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2013 rfp->n_launders_hist[i].n_launders = 0; 2014 cver = READ_ONCE(rcu_torture_current_version); 2015 gps = cur_ops->get_gp_seq(); 2016 rfp->rcu_launder_gp_seq_start = gps; 2017 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2018 while (time_before(jiffies, stopat) && 2019 !shutdown_time_arrived() && 2020 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2021 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2022 rfcpn = NULL; 2023 if (rfcp) 2024 rfcpn = READ_ONCE(rfcp->rfc_next); 2025 if (rfcpn) { 2026 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2027 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2028 break; 2029 rfp->rcu_fwd_cb_head = rfcpn; 2030 n_launders++; 2031 n_launders_sa++; 2032 } else { 2033 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2034 if (WARN_ON_ONCE(!rfcp)) { 2035 schedule_timeout_interruptible(1); 2036 continue; 2037 } 2038 n_max_cbs++; 2039 n_launders_sa = 0; 2040 rfcp->rfc_gps = 0; 2041 rfcp->rfc_rfp = rfp; 2042 } 2043 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2044 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2045 if (tick_nohz_full_enabled()) { 2046 local_irq_save(flags); 2047 rcu_momentary_dyntick_idle(); 2048 local_irq_restore(flags); 2049 } 2050 } 2051 stoppedat = jiffies; 2052 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2053 cver = READ_ONCE(rcu_torture_current_version) - cver; 2054 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2055 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2056 (void)rcu_torture_fwd_prog_cbfree(rfp); 2057 2058 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2059 !shutdown_time_arrived()) { 2060 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2061 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2062 __func__, 2063 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2064 n_launders + n_max_cbs - n_launders_cb_snap, 2065 n_launders, n_launders_sa, 2066 n_max_gps, n_max_cbs, cver, gps); 2067 rcu_torture_fwd_cb_hist(rfp); 2068 } 2069 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2070 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2071 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2072 } 2073 2074 2075 /* 2076 * OOM notifier, but this only prints diagnostic information for the 2077 * current forward-progress test. 2078 */ 2079 static int rcutorture_oom_notify(struct notifier_block *self, 2080 unsigned long notused, void *nfreed) 2081 { 2082 struct rcu_fwd *rfp = rcu_fwds; 2083 2084 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2085 __func__); 2086 rcu_torture_fwd_cb_hist(rfp); 2087 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2088 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2089 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2090 pr_info("%s: Freed %lu RCU callbacks.\n", 2091 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2092 rcu_barrier(); 2093 pr_info("%s: Freed %lu RCU callbacks.\n", 2094 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2095 rcu_barrier(); 2096 pr_info("%s: Freed %lu RCU callbacks.\n", 2097 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2098 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2099 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2100 pr_info("%s returning after OOM processing.\n", __func__); 2101 return NOTIFY_OK; 2102 } 2103 2104 static struct notifier_block rcutorture_oom_nb = { 2105 .notifier_call = rcutorture_oom_notify 2106 }; 2107 2108 /* Carry out grace-period forward-progress testing. */ 2109 static int rcu_torture_fwd_prog(void *args) 2110 { 2111 struct rcu_fwd *rfp = args; 2112 int tested = 0; 2113 int tested_tries = 0; 2114 2115 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2116 rcu_bind_current_to_nocb(); 2117 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2118 set_user_nice(current, MAX_NICE); 2119 do { 2120 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2121 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2122 register_oom_notifier(&rcutorture_oom_nb); 2123 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2124 rcu_inkernel_boot_has_ended()) 2125 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2126 if (rcu_inkernel_boot_has_ended()) 2127 rcu_torture_fwd_prog_cr(rfp); 2128 unregister_oom_notifier(&rcutorture_oom_nb); 2129 2130 /* Avoid slow periods, better to test when busy. */ 2131 stutter_wait("rcu_torture_fwd_prog"); 2132 } while (!torture_must_stop()); 2133 /* Short runs might not contain a valid forward-progress attempt. */ 2134 WARN_ON(!tested && tested_tries >= 5); 2135 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2136 torture_kthread_stopping("rcu_torture_fwd_prog"); 2137 return 0; 2138 } 2139 2140 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2141 static int __init rcu_torture_fwd_prog_init(void) 2142 { 2143 struct rcu_fwd *rfp; 2144 2145 if (!fwd_progress) 2146 return 0; /* Not requested, so don't do it. */ 2147 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 2148 cur_ops == &rcu_busted_ops) { 2149 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2150 return 0; 2151 } 2152 if (stall_cpu > 0) { 2153 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2154 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2155 return -EINVAL; /* In module, can fail back to user. */ 2156 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2157 return 0; 2158 } 2159 if (fwd_progress_holdoff <= 0) 2160 fwd_progress_holdoff = 1; 2161 if (fwd_progress_div <= 0) 2162 fwd_progress_div = 4; 2163 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2164 if (!rfp) 2165 return -ENOMEM; 2166 spin_lock_init(&rfp->rcu_fwd_lock); 2167 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2168 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2169 } 2170 2171 /* Callback function for RCU barrier testing. */ 2172 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2173 { 2174 atomic_inc(&barrier_cbs_invoked); 2175 } 2176 2177 /* IPI handler to get callback posted on desired CPU, if online. */ 2178 static void rcu_torture_barrier1cb(void *rcu_void) 2179 { 2180 struct rcu_head *rhp = rcu_void; 2181 2182 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2183 } 2184 2185 /* kthread function to register callbacks used to test RCU barriers. */ 2186 static int rcu_torture_barrier_cbs(void *arg) 2187 { 2188 long myid = (long)arg; 2189 bool lastphase = false; 2190 bool newphase; 2191 struct rcu_head rcu; 2192 2193 init_rcu_head_on_stack(&rcu); 2194 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2195 set_user_nice(current, MAX_NICE); 2196 do { 2197 wait_event(barrier_cbs_wq[myid], 2198 (newphase = 2199 smp_load_acquire(&barrier_phase)) != lastphase || 2200 torture_must_stop()); 2201 lastphase = newphase; 2202 if (torture_must_stop()) 2203 break; 2204 /* 2205 * The above smp_load_acquire() ensures barrier_phase load 2206 * is ordered before the following ->call(). 2207 */ 2208 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2209 &rcu, 1)) { 2210 // IPI failed, so use direct call from current CPU. 2211 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2212 } 2213 if (atomic_dec_and_test(&barrier_cbs_count)) 2214 wake_up(&barrier_wq); 2215 } while (!torture_must_stop()); 2216 if (cur_ops->cb_barrier != NULL) 2217 cur_ops->cb_barrier(); 2218 destroy_rcu_head_on_stack(&rcu); 2219 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2220 return 0; 2221 } 2222 2223 /* kthread function to drive and coordinate RCU barrier testing. */ 2224 static int rcu_torture_barrier(void *arg) 2225 { 2226 int i; 2227 2228 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2229 do { 2230 atomic_set(&barrier_cbs_invoked, 0); 2231 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2232 /* Ensure barrier_phase ordered after prior assignments. */ 2233 smp_store_release(&barrier_phase, !barrier_phase); 2234 for (i = 0; i < n_barrier_cbs; i++) 2235 wake_up(&barrier_cbs_wq[i]); 2236 wait_event(barrier_wq, 2237 atomic_read(&barrier_cbs_count) == 0 || 2238 torture_must_stop()); 2239 if (torture_must_stop()) 2240 break; 2241 n_barrier_attempts++; 2242 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2243 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2244 n_rcu_torture_barrier_error++; 2245 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2246 atomic_read(&barrier_cbs_invoked), 2247 n_barrier_cbs); 2248 WARN_ON(1); 2249 // Wait manually for the remaining callbacks 2250 i = 0; 2251 do { 2252 if (WARN_ON(i++ > HZ)) 2253 i = INT_MIN; 2254 schedule_timeout_interruptible(1); 2255 cur_ops->cb_barrier(); 2256 } while (atomic_read(&barrier_cbs_invoked) != 2257 n_barrier_cbs && 2258 !torture_must_stop()); 2259 smp_mb(); // Can't trust ordering if broken. 2260 if (!torture_must_stop()) 2261 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2262 atomic_read(&barrier_cbs_invoked)); 2263 } else { 2264 n_barrier_successes++; 2265 } 2266 schedule_timeout_interruptible(HZ / 10); 2267 } while (!torture_must_stop()); 2268 torture_kthread_stopping("rcu_torture_barrier"); 2269 return 0; 2270 } 2271 2272 /* Initialize RCU barrier testing. */ 2273 static int rcu_torture_barrier_init(void) 2274 { 2275 int i; 2276 int ret; 2277 2278 if (n_barrier_cbs <= 0) 2279 return 0; 2280 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2281 pr_alert("%s" TORTURE_FLAG 2282 " Call or barrier ops missing for %s,\n", 2283 torture_type, cur_ops->name); 2284 pr_alert("%s" TORTURE_FLAG 2285 " RCU barrier testing omitted from run.\n", 2286 torture_type); 2287 return 0; 2288 } 2289 atomic_set(&barrier_cbs_count, 0); 2290 atomic_set(&barrier_cbs_invoked, 0); 2291 barrier_cbs_tasks = 2292 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2293 GFP_KERNEL); 2294 barrier_cbs_wq = 2295 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2296 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2297 return -ENOMEM; 2298 for (i = 0; i < n_barrier_cbs; i++) { 2299 init_waitqueue_head(&barrier_cbs_wq[i]); 2300 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2301 (void *)(long)i, 2302 barrier_cbs_tasks[i]); 2303 if (ret) 2304 return ret; 2305 } 2306 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2307 } 2308 2309 /* Clean up after RCU barrier testing. */ 2310 static void rcu_torture_barrier_cleanup(void) 2311 { 2312 int i; 2313 2314 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2315 if (barrier_cbs_tasks != NULL) { 2316 for (i = 0; i < n_barrier_cbs; i++) 2317 torture_stop_kthread(rcu_torture_barrier_cbs, 2318 barrier_cbs_tasks[i]); 2319 kfree(barrier_cbs_tasks); 2320 barrier_cbs_tasks = NULL; 2321 } 2322 if (barrier_cbs_wq != NULL) { 2323 kfree(barrier_cbs_wq); 2324 barrier_cbs_wq = NULL; 2325 } 2326 } 2327 2328 static bool rcu_torture_can_boost(void) 2329 { 2330 static int boost_warn_once; 2331 int prio; 2332 2333 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2334 return false; 2335 2336 prio = rcu_get_gp_kthreads_prio(); 2337 if (!prio) 2338 return false; 2339 2340 if (prio < 2) { 2341 if (boost_warn_once == 1) 2342 return false; 2343 2344 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2345 boost_warn_once = 1; 2346 return false; 2347 } 2348 2349 return true; 2350 } 2351 2352 static bool read_exit_child_stop; 2353 static bool read_exit_child_stopped; 2354 static wait_queue_head_t read_exit_wq; 2355 2356 // Child kthread which just does an rcutorture reader and exits. 2357 static int rcu_torture_read_exit_child(void *trsp_in) 2358 { 2359 struct torture_random_state *trsp = trsp_in; 2360 2361 set_user_nice(current, MAX_NICE); 2362 // Minimize time between reading and exiting. 2363 while (!kthread_should_stop()) 2364 schedule_timeout_uninterruptible(1); 2365 (void)rcu_torture_one_read(trsp); 2366 return 0; 2367 } 2368 2369 // Parent kthread which creates and destroys read-exit child kthreads. 2370 static int rcu_torture_read_exit(void *unused) 2371 { 2372 int count = 0; 2373 bool errexit = false; 2374 int i; 2375 struct task_struct *tsp; 2376 DEFINE_TORTURE_RANDOM(trs); 2377 2378 // Allocate and initialize. 2379 set_user_nice(current, MAX_NICE); 2380 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 2381 2382 // Each pass through this loop does one read-exit episode. 2383 do { 2384 if (++count > read_exit_burst) { 2385 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 2386 rcu_barrier(); // Wait for task_struct free, avoid OOM. 2387 for (i = 0; i < read_exit_delay; i++) { 2388 schedule_timeout_uninterruptible(HZ); 2389 if (READ_ONCE(read_exit_child_stop)) 2390 break; 2391 } 2392 if (!READ_ONCE(read_exit_child_stop)) 2393 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 2394 count = 0; 2395 } 2396 if (READ_ONCE(read_exit_child_stop)) 2397 break; 2398 // Spawn child. 2399 tsp = kthread_run(rcu_torture_read_exit_child, 2400 &trs, "%s", 2401 "rcu_torture_read_exit_child"); 2402 if (IS_ERR(tsp)) { 2403 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2404 errexit = true; 2405 tsp = NULL; 2406 break; 2407 } 2408 cond_resched(); 2409 kthread_stop(tsp); 2410 n_read_exits ++; 2411 stutter_wait("rcu_torture_read_exit"); 2412 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 2413 2414 // Clean up and exit. 2415 smp_store_release(&read_exit_child_stopped, true); // After reaping. 2416 smp_mb(); // Store before wakeup. 2417 wake_up(&read_exit_wq); 2418 while (!torture_must_stop()) 2419 schedule_timeout_uninterruptible(1); 2420 torture_kthread_stopping("rcu_torture_read_exit"); 2421 return 0; 2422 } 2423 2424 static int rcu_torture_read_exit_init(void) 2425 { 2426 if (read_exit_burst <= 0) 2427 return -EINVAL; 2428 init_waitqueue_head(&read_exit_wq); 2429 read_exit_child_stop = false; 2430 read_exit_child_stopped = false; 2431 return torture_create_kthread(rcu_torture_read_exit, NULL, 2432 read_exit_task); 2433 } 2434 2435 static void rcu_torture_read_exit_cleanup(void) 2436 { 2437 if (!read_exit_task) 2438 return; 2439 WRITE_ONCE(read_exit_child_stop, true); 2440 smp_mb(); // Above write before wait. 2441 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 2442 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 2443 } 2444 2445 static enum cpuhp_state rcutor_hp; 2446 2447 static void 2448 rcu_torture_cleanup(void) 2449 { 2450 int firsttime; 2451 int flags = 0; 2452 unsigned long gp_seq = 0; 2453 int i; 2454 2455 if (torture_cleanup_begin()) { 2456 if (cur_ops->cb_barrier != NULL) 2457 cur_ops->cb_barrier(); 2458 return; 2459 } 2460 if (!cur_ops) { 2461 torture_cleanup_end(); 2462 return; 2463 } 2464 2465 show_rcu_gp_kthreads(); 2466 rcu_torture_read_exit_cleanup(); 2467 rcu_torture_barrier_cleanup(); 2468 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2469 torture_stop_kthread(rcu_torture_stall, stall_task); 2470 torture_stop_kthread(rcu_torture_writer, writer_task); 2471 2472 if (reader_tasks) { 2473 for (i = 0; i < nrealreaders; i++) 2474 torture_stop_kthread(rcu_torture_reader, 2475 reader_tasks[i]); 2476 kfree(reader_tasks); 2477 } 2478 2479 if (fakewriter_tasks) { 2480 for (i = 0; i < nfakewriters; i++) { 2481 torture_stop_kthread(rcu_torture_fakewriter, 2482 fakewriter_tasks[i]); 2483 } 2484 kfree(fakewriter_tasks); 2485 fakewriter_tasks = NULL; 2486 } 2487 2488 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2489 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2490 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2491 cur_ops->name, gp_seq, flags); 2492 torture_stop_kthread(rcu_torture_stats, stats_task); 2493 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2494 if (rcu_torture_can_boost()) 2495 cpuhp_remove_state(rcutor_hp); 2496 2497 /* 2498 * Wait for all RCU callbacks to fire, then do torture-type-specific 2499 * cleanup operations. 2500 */ 2501 if (cur_ops->cb_barrier != NULL) 2502 cur_ops->cb_barrier(); 2503 if (cur_ops->cleanup != NULL) 2504 cur_ops->cleanup(); 2505 2506 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2507 2508 if (err_segs_recorded) { 2509 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2510 if (rt_read_nsegs == 0) 2511 pr_alert("\t: No segments recorded!!!\n"); 2512 firsttime = 1; 2513 for (i = 0; i < rt_read_nsegs; i++) { 2514 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2515 if (err_segs[i].rt_delay_jiffies != 0) { 2516 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2517 err_segs[i].rt_delay_jiffies); 2518 firsttime = 0; 2519 } 2520 if (err_segs[i].rt_delay_ms != 0) { 2521 pr_cont("%s%ldms", firsttime ? "" : "+", 2522 err_segs[i].rt_delay_ms); 2523 firsttime = 0; 2524 } 2525 if (err_segs[i].rt_delay_us != 0) { 2526 pr_cont("%s%ldus", firsttime ? "" : "+", 2527 err_segs[i].rt_delay_us); 2528 firsttime = 0; 2529 } 2530 pr_cont("%s\n", 2531 err_segs[i].rt_preempted ? "preempted" : ""); 2532 2533 } 2534 } 2535 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2536 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2537 else if (torture_onoff_failures()) 2538 rcu_torture_print_module_parms(cur_ops, 2539 "End of test: RCU_HOTPLUG"); 2540 else 2541 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2542 torture_cleanup_end(); 2543 } 2544 2545 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2546 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2547 { 2548 } 2549 2550 static void rcu_torture_err_cb(struct rcu_head *rhp) 2551 { 2552 /* 2553 * This -might- happen due to race conditions, but is unlikely. 2554 * The scenario that leads to this happening is that the 2555 * first of the pair of duplicate callbacks is queued, 2556 * someone else starts a grace period that includes that 2557 * callback, then the second of the pair must wait for the 2558 * next grace period. Unlikely, but can happen. If it 2559 * does happen, the debug-objects subsystem won't have splatted. 2560 */ 2561 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2562 } 2563 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2564 2565 /* 2566 * Verify that double-free causes debug-objects to complain, but only 2567 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2568 * cannot be carried out. 2569 */ 2570 static void rcu_test_debug_objects(void) 2571 { 2572 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2573 struct rcu_head rh1; 2574 struct rcu_head rh2; 2575 2576 init_rcu_head_on_stack(&rh1); 2577 init_rcu_head_on_stack(&rh2); 2578 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2579 2580 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2581 preempt_disable(); /* Prevent preemption from interrupting test. */ 2582 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2583 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2584 local_irq_disable(); /* Make it harder to start a new grace period. */ 2585 call_rcu(&rh2, rcu_torture_leak_cb); 2586 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2587 local_irq_enable(); 2588 rcu_read_unlock(); 2589 preempt_enable(); 2590 2591 /* Wait for them all to get done so we can safely return. */ 2592 rcu_barrier(); 2593 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2594 destroy_rcu_head_on_stack(&rh1); 2595 destroy_rcu_head_on_stack(&rh2); 2596 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2597 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2598 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2599 } 2600 2601 static void rcutorture_sync(void) 2602 { 2603 static unsigned long n; 2604 2605 if (cur_ops->sync && !(++n & 0xfff)) 2606 cur_ops->sync(); 2607 } 2608 2609 static int __init 2610 rcu_torture_init(void) 2611 { 2612 long i; 2613 int cpu; 2614 int firsterr = 0; 2615 static struct rcu_torture_ops *torture_ops[] = { 2616 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2617 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, 2618 &tasks_tracing_ops, &trivial_ops, 2619 }; 2620 2621 if (!torture_init_begin(torture_type, verbose)) 2622 return -EBUSY; 2623 2624 /* Process args and tell the world that the torturer is on the job. */ 2625 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2626 cur_ops = torture_ops[i]; 2627 if (strcmp(torture_type, cur_ops->name) == 0) 2628 break; 2629 } 2630 if (i == ARRAY_SIZE(torture_ops)) { 2631 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2632 torture_type); 2633 pr_alert("rcu-torture types:"); 2634 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2635 pr_cont(" %s", torture_ops[i]->name); 2636 pr_cont("\n"); 2637 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2638 firsterr = -EINVAL; 2639 cur_ops = NULL; 2640 goto unwind; 2641 } 2642 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2643 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2644 fqs_duration = 0; 2645 } 2646 if (cur_ops->init) 2647 cur_ops->init(); 2648 2649 if (nreaders >= 0) { 2650 nrealreaders = nreaders; 2651 } else { 2652 nrealreaders = num_online_cpus() - 2 - nreaders; 2653 if (nrealreaders <= 0) 2654 nrealreaders = 1; 2655 } 2656 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2657 2658 /* Set up the freelist. */ 2659 2660 INIT_LIST_HEAD(&rcu_torture_freelist); 2661 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2662 rcu_tortures[i].rtort_mbtest = 0; 2663 list_add_tail(&rcu_tortures[i].rtort_free, 2664 &rcu_torture_freelist); 2665 } 2666 2667 /* Initialize the statistics so that each run gets its own numbers. */ 2668 2669 rcu_torture_current = NULL; 2670 rcu_torture_current_version = 0; 2671 atomic_set(&n_rcu_torture_alloc, 0); 2672 atomic_set(&n_rcu_torture_alloc_fail, 0); 2673 atomic_set(&n_rcu_torture_free, 0); 2674 atomic_set(&n_rcu_torture_mberror, 0); 2675 atomic_set(&n_rcu_torture_error, 0); 2676 n_rcu_torture_barrier_error = 0; 2677 n_rcu_torture_boost_ktrerror = 0; 2678 n_rcu_torture_boost_rterror = 0; 2679 n_rcu_torture_boost_failure = 0; 2680 n_rcu_torture_boosts = 0; 2681 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2682 atomic_set(&rcu_torture_wcount[i], 0); 2683 for_each_possible_cpu(cpu) { 2684 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2685 per_cpu(rcu_torture_count, cpu)[i] = 0; 2686 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2687 } 2688 } 2689 err_segs_recorded = 0; 2690 rt_read_nsegs = 0; 2691 2692 /* Start up the kthreads. */ 2693 2694 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2695 writer_task); 2696 if (firsterr) 2697 goto unwind; 2698 if (nfakewriters > 0) { 2699 fakewriter_tasks = kcalloc(nfakewriters, 2700 sizeof(fakewriter_tasks[0]), 2701 GFP_KERNEL); 2702 if (fakewriter_tasks == NULL) { 2703 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2704 firsterr = -ENOMEM; 2705 goto unwind; 2706 } 2707 } 2708 for (i = 0; i < nfakewriters; i++) { 2709 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2710 NULL, fakewriter_tasks[i]); 2711 if (firsterr) 2712 goto unwind; 2713 } 2714 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2715 GFP_KERNEL); 2716 if (reader_tasks == NULL) { 2717 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2718 firsterr = -ENOMEM; 2719 goto unwind; 2720 } 2721 for (i = 0; i < nrealreaders; i++) { 2722 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2723 reader_tasks[i]); 2724 if (firsterr) 2725 goto unwind; 2726 } 2727 if (stat_interval > 0) { 2728 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2729 stats_task); 2730 if (firsterr) 2731 goto unwind; 2732 } 2733 if (test_no_idle_hz && shuffle_interval > 0) { 2734 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2735 if (firsterr) 2736 goto unwind; 2737 } 2738 if (stutter < 0) 2739 stutter = 0; 2740 if (stutter) { 2741 int t; 2742 2743 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2744 firsterr = torture_stutter_init(stutter * HZ, t); 2745 if (firsterr) 2746 goto unwind; 2747 } 2748 if (fqs_duration < 0) 2749 fqs_duration = 0; 2750 if (fqs_duration) { 2751 /* Create the fqs thread */ 2752 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2753 fqs_task); 2754 if (firsterr) 2755 goto unwind; 2756 } 2757 if (test_boost_interval < 1) 2758 test_boost_interval = 1; 2759 if (test_boost_duration < 2) 2760 test_boost_duration = 2; 2761 if (rcu_torture_can_boost()) { 2762 2763 boost_starttime = jiffies + test_boost_interval * HZ; 2764 2765 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2766 rcutorture_booster_init, 2767 rcutorture_booster_cleanup); 2768 if (firsterr < 0) 2769 goto unwind; 2770 rcutor_hp = firsterr; 2771 } 2772 shutdown_jiffies = jiffies + shutdown_secs * HZ; 2773 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2774 if (firsterr) 2775 goto unwind; 2776 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2777 rcutorture_sync); 2778 if (firsterr) 2779 goto unwind; 2780 firsterr = rcu_torture_stall_init(); 2781 if (firsterr) 2782 goto unwind; 2783 firsterr = rcu_torture_fwd_prog_init(); 2784 if (firsterr) 2785 goto unwind; 2786 firsterr = rcu_torture_barrier_init(); 2787 if (firsterr) 2788 goto unwind; 2789 firsterr = rcu_torture_read_exit_init(); 2790 if (firsterr) 2791 goto unwind; 2792 if (object_debug) 2793 rcu_test_debug_objects(); 2794 torture_init_end(); 2795 return 0; 2796 2797 unwind: 2798 torture_init_end(); 2799 rcu_torture_cleanup(); 2800 return firsterr; 2801 } 2802 2803 module_init(rcu_torture_init); 2804 module_exit(rcu_torture_cleanup); 2805