1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.txt 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 50 #include "rcu.h" 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 54 55 #ifndef data_race 56 #define data_race(expr) \ 57 ({ \ 58 expr; \ 59 }) 60 #endif 61 #ifndef ASSERT_EXCLUSIVE_WRITER 62 #define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0) 63 #endif 64 #ifndef ASSERT_EXCLUSIVE_ACCESS 65 #define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0) 66 #endif 67 68 /* Bits for ->extendables field, extendables param, and related definitions. */ 69 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 70 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 71 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 72 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 73 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 74 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 75 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 76 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 77 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 78 #define RCUTORTURE_MAX_EXTEND \ 79 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 80 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 81 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 82 /* Must be power of two minus one. */ 83 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 84 85 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 86 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 87 torture_param(int, fqs_duration, 0, 88 "Duration of fqs bursts (us), 0 to disable"); 89 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 90 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 91 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 92 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 93 torture_param(int, fwd_progress_holdoff, 60, 94 "Time between forward-progress tests (s)"); 95 torture_param(bool, fwd_progress_need_resched, 1, 96 "Hide cond_resched() behind need_resched()"); 97 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 98 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 99 torture_param(bool, gp_normal, false, 100 "Use normal (non-expedited) GP wait primitives"); 101 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 102 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 103 torture_param(int, n_barrier_cbs, 0, 104 "# of callbacks/kthreads for barrier testing"); 105 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 106 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 107 torture_param(int, object_debug, 0, 108 "Enable debug-object double call_rcu() testing"); 109 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 110 torture_param(int, onoff_interval, 0, 111 "Time between CPU hotplugs (jiffies), 0=disable"); 112 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 113 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 114 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 115 torture_param(int, stall_cpu_holdoff, 10, 116 "Time to wait before starting stall (s)."); 117 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 118 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 119 torture_param(int, stall_gp_kthread, 0, 120 "Grace-period kthread stall duration (s)."); 121 torture_param(int, stat_interval, 60, 122 "Number of seconds between stats printk()s"); 123 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 124 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 125 torture_param(int, test_boost_duration, 4, 126 "Duration of each boost test, seconds."); 127 torture_param(int, test_boost_interval, 7, 128 "Interval between boost tests, seconds."); 129 torture_param(bool, test_no_idle_hz, true, 130 "Test support for tickless idle CPUs"); 131 torture_param(int, verbose, 1, 132 "Enable verbose debugging printk()s"); 133 134 static char *torture_type = "rcu"; 135 module_param(torture_type, charp, 0444); 136 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 137 138 static int nrealreaders; 139 static struct task_struct *writer_task; 140 static struct task_struct **fakewriter_tasks; 141 static struct task_struct **reader_tasks; 142 static struct task_struct *stats_task; 143 static struct task_struct *fqs_task; 144 static struct task_struct *boost_tasks[NR_CPUS]; 145 static struct task_struct *stall_task; 146 static struct task_struct *fwd_prog_task; 147 static struct task_struct **barrier_cbs_tasks; 148 static struct task_struct *barrier_task; 149 150 #define RCU_TORTURE_PIPE_LEN 10 151 152 struct rcu_torture { 153 struct rcu_head rtort_rcu; 154 int rtort_pipe_count; 155 struct list_head rtort_free; 156 int rtort_mbtest; 157 }; 158 159 static LIST_HEAD(rcu_torture_freelist); 160 static struct rcu_torture __rcu *rcu_torture_current; 161 static unsigned long rcu_torture_current_version; 162 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 163 static DEFINE_SPINLOCK(rcu_torture_lock); 164 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 165 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 166 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 167 static atomic_t n_rcu_torture_alloc; 168 static atomic_t n_rcu_torture_alloc_fail; 169 static atomic_t n_rcu_torture_free; 170 static atomic_t n_rcu_torture_mberror; 171 static atomic_t n_rcu_torture_error; 172 static long n_rcu_torture_barrier_error; 173 static long n_rcu_torture_boost_ktrerror; 174 static long n_rcu_torture_boost_rterror; 175 static long n_rcu_torture_boost_failure; 176 static long n_rcu_torture_boosts; 177 static atomic_long_t n_rcu_torture_timers; 178 static long n_barrier_attempts; 179 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 180 static struct list_head rcu_torture_removed; 181 static unsigned long shutdown_jiffies; 182 183 static int rcu_torture_writer_state; 184 #define RTWS_FIXED_DELAY 0 185 #define RTWS_DELAY 1 186 #define RTWS_REPLACE 2 187 #define RTWS_DEF_FREE 3 188 #define RTWS_EXP_SYNC 4 189 #define RTWS_COND_GET 5 190 #define RTWS_COND_SYNC 6 191 #define RTWS_SYNC 7 192 #define RTWS_STUTTER 8 193 #define RTWS_STOPPING 9 194 static const char * const rcu_torture_writer_state_names[] = { 195 "RTWS_FIXED_DELAY", 196 "RTWS_DELAY", 197 "RTWS_REPLACE", 198 "RTWS_DEF_FREE", 199 "RTWS_EXP_SYNC", 200 "RTWS_COND_GET", 201 "RTWS_COND_SYNC", 202 "RTWS_SYNC", 203 "RTWS_STUTTER", 204 "RTWS_STOPPING", 205 }; 206 207 /* Record reader segment types and duration for first failing read. */ 208 struct rt_read_seg { 209 int rt_readstate; 210 unsigned long rt_delay_jiffies; 211 unsigned long rt_delay_ms; 212 unsigned long rt_delay_us; 213 bool rt_preempted; 214 }; 215 static int err_segs_recorded; 216 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 217 static int rt_read_nsegs; 218 219 static const char *rcu_torture_writer_state_getname(void) 220 { 221 unsigned int i = READ_ONCE(rcu_torture_writer_state); 222 223 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 224 return "???"; 225 return rcu_torture_writer_state_names[i]; 226 } 227 228 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 229 #define rcu_can_boost() 1 230 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 231 #define rcu_can_boost() 0 232 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 233 234 #ifdef CONFIG_RCU_TRACE 235 static u64 notrace rcu_trace_clock_local(void) 236 { 237 u64 ts = trace_clock_local(); 238 239 (void)do_div(ts, NSEC_PER_USEC); 240 return ts; 241 } 242 #else /* #ifdef CONFIG_RCU_TRACE */ 243 static u64 notrace rcu_trace_clock_local(void) 244 { 245 return 0ULL; 246 } 247 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 248 249 /* 250 * Stop aggressive CPU-hog tests a bit before the end of the test in order 251 * to avoid interfering with test shutdown. 252 */ 253 static bool shutdown_time_arrived(void) 254 { 255 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 256 } 257 258 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 259 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 260 /* and boost task create/destroy. */ 261 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 262 static bool barrier_phase; /* Test phase. */ 263 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 264 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 265 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 266 267 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 268 269 /* 270 * Allocate an element from the rcu_tortures pool. 271 */ 272 static struct rcu_torture * 273 rcu_torture_alloc(void) 274 { 275 struct list_head *p; 276 277 spin_lock_bh(&rcu_torture_lock); 278 if (list_empty(&rcu_torture_freelist)) { 279 atomic_inc(&n_rcu_torture_alloc_fail); 280 spin_unlock_bh(&rcu_torture_lock); 281 return NULL; 282 } 283 atomic_inc(&n_rcu_torture_alloc); 284 p = rcu_torture_freelist.next; 285 list_del_init(p); 286 spin_unlock_bh(&rcu_torture_lock); 287 return container_of(p, struct rcu_torture, rtort_free); 288 } 289 290 /* 291 * Free an element to the rcu_tortures pool. 292 */ 293 static void 294 rcu_torture_free(struct rcu_torture *p) 295 { 296 atomic_inc(&n_rcu_torture_free); 297 spin_lock_bh(&rcu_torture_lock); 298 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 299 spin_unlock_bh(&rcu_torture_lock); 300 } 301 302 /* 303 * Operations vector for selecting different types of tests. 304 */ 305 306 struct rcu_torture_ops { 307 int ttype; 308 void (*init)(void); 309 void (*cleanup)(void); 310 int (*readlock)(void); 311 void (*read_delay)(struct torture_random_state *rrsp, 312 struct rt_read_seg *rtrsp); 313 void (*readunlock)(int idx); 314 unsigned long (*get_gp_seq)(void); 315 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 316 void (*deferred_free)(struct rcu_torture *p); 317 void (*sync)(void); 318 void (*exp_sync)(void); 319 unsigned long (*get_state)(void); 320 void (*cond_sync)(unsigned long oldstate); 321 call_rcu_func_t call; 322 void (*cb_barrier)(void); 323 void (*fqs)(void); 324 void (*stats)(void); 325 int (*stall_dur)(void); 326 int irq_capable; 327 int can_boost; 328 int extendables; 329 int slow_gps; 330 const char *name; 331 }; 332 333 static struct rcu_torture_ops *cur_ops; 334 335 /* 336 * Definitions for rcu torture testing. 337 */ 338 339 static int rcu_torture_read_lock(void) __acquires(RCU) 340 { 341 rcu_read_lock(); 342 return 0; 343 } 344 345 static void 346 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 347 { 348 unsigned long started; 349 unsigned long completed; 350 const unsigned long shortdelay_us = 200; 351 unsigned long longdelay_ms = 300; 352 unsigned long long ts; 353 354 /* We want a short delay sometimes to make a reader delay the grace 355 * period, and we want a long delay occasionally to trigger 356 * force_quiescent_state. */ 357 358 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 359 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 360 started = cur_ops->get_gp_seq(); 361 ts = rcu_trace_clock_local(); 362 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 363 longdelay_ms = 5; /* Avoid triggering BH limits. */ 364 mdelay(longdelay_ms); 365 rtrsp->rt_delay_ms = longdelay_ms; 366 completed = cur_ops->get_gp_seq(); 367 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 368 started, completed); 369 } 370 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 371 udelay(shortdelay_us); 372 rtrsp->rt_delay_us = shortdelay_us; 373 } 374 if (!preempt_count() && 375 !(torture_random(rrsp) % (nrealreaders * 500))) { 376 torture_preempt_schedule(); /* QS only if preemptible. */ 377 rtrsp->rt_preempted = true; 378 } 379 } 380 381 static void rcu_torture_read_unlock(int idx) __releases(RCU) 382 { 383 rcu_read_unlock(); 384 } 385 386 /* 387 * Update callback in the pipe. This should be invoked after a grace period. 388 */ 389 static bool 390 rcu_torture_pipe_update_one(struct rcu_torture *rp) 391 { 392 int i; 393 394 i = READ_ONCE(rp->rtort_pipe_count); 395 if (i > RCU_TORTURE_PIPE_LEN) 396 i = RCU_TORTURE_PIPE_LEN; 397 atomic_inc(&rcu_torture_wcount[i]); 398 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 399 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 400 rp->rtort_mbtest = 0; 401 return true; 402 } 403 return false; 404 } 405 406 /* 407 * Update all callbacks in the pipe. Suitable for synchronous grace-period 408 * primitives. 409 */ 410 static void 411 rcu_torture_pipe_update(struct rcu_torture *old_rp) 412 { 413 struct rcu_torture *rp; 414 struct rcu_torture *rp1; 415 416 if (old_rp) 417 list_add(&old_rp->rtort_free, &rcu_torture_removed); 418 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 419 if (rcu_torture_pipe_update_one(rp)) { 420 list_del(&rp->rtort_free); 421 rcu_torture_free(rp); 422 } 423 } 424 } 425 426 static void 427 rcu_torture_cb(struct rcu_head *p) 428 { 429 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 430 431 if (torture_must_stop_irq()) { 432 /* Test is ending, just drop callbacks on the floor. */ 433 /* The next initialization will pick up the pieces. */ 434 return; 435 } 436 if (rcu_torture_pipe_update_one(rp)) 437 rcu_torture_free(rp); 438 else 439 cur_ops->deferred_free(rp); 440 } 441 442 static unsigned long rcu_no_completed(void) 443 { 444 return 0; 445 } 446 447 static void rcu_torture_deferred_free(struct rcu_torture *p) 448 { 449 call_rcu(&p->rtort_rcu, rcu_torture_cb); 450 } 451 452 static void rcu_sync_torture_init(void) 453 { 454 INIT_LIST_HEAD(&rcu_torture_removed); 455 } 456 457 static struct rcu_torture_ops rcu_ops = { 458 .ttype = RCU_FLAVOR, 459 .init = rcu_sync_torture_init, 460 .readlock = rcu_torture_read_lock, 461 .read_delay = rcu_read_delay, 462 .readunlock = rcu_torture_read_unlock, 463 .get_gp_seq = rcu_get_gp_seq, 464 .gp_diff = rcu_seq_diff, 465 .deferred_free = rcu_torture_deferred_free, 466 .sync = synchronize_rcu, 467 .exp_sync = synchronize_rcu_expedited, 468 .get_state = get_state_synchronize_rcu, 469 .cond_sync = cond_synchronize_rcu, 470 .call = call_rcu, 471 .cb_barrier = rcu_barrier, 472 .fqs = rcu_force_quiescent_state, 473 .stats = NULL, 474 .stall_dur = rcu_jiffies_till_stall_check, 475 .irq_capable = 1, 476 .can_boost = rcu_can_boost(), 477 .extendables = RCUTORTURE_MAX_EXTEND, 478 .name = "rcu" 479 }; 480 481 /* 482 * Don't even think about trying any of these in real life!!! 483 * The names includes "busted", and they really means it! 484 * The only purpose of these functions is to provide a buggy RCU 485 * implementation to make sure that rcutorture correctly emits 486 * buggy-RCU error messages. 487 */ 488 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 489 { 490 /* This is a deliberate bug for testing purposes only! */ 491 rcu_torture_cb(&p->rtort_rcu); 492 } 493 494 static void synchronize_rcu_busted(void) 495 { 496 /* This is a deliberate bug for testing purposes only! */ 497 } 498 499 static void 500 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 501 { 502 /* This is a deliberate bug for testing purposes only! */ 503 func(head); 504 } 505 506 static struct rcu_torture_ops rcu_busted_ops = { 507 .ttype = INVALID_RCU_FLAVOR, 508 .init = rcu_sync_torture_init, 509 .readlock = rcu_torture_read_lock, 510 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 511 .readunlock = rcu_torture_read_unlock, 512 .get_gp_seq = rcu_no_completed, 513 .deferred_free = rcu_busted_torture_deferred_free, 514 .sync = synchronize_rcu_busted, 515 .exp_sync = synchronize_rcu_busted, 516 .call = call_rcu_busted, 517 .cb_barrier = NULL, 518 .fqs = NULL, 519 .stats = NULL, 520 .irq_capable = 1, 521 .name = "busted" 522 }; 523 524 /* 525 * Definitions for srcu torture testing. 526 */ 527 528 DEFINE_STATIC_SRCU(srcu_ctl); 529 static struct srcu_struct srcu_ctld; 530 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 531 532 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 533 { 534 return srcu_read_lock(srcu_ctlp); 535 } 536 537 static void 538 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 539 { 540 long delay; 541 const long uspertick = 1000000 / HZ; 542 const long longdelay = 10; 543 544 /* We want there to be long-running readers, but not all the time. */ 545 546 delay = torture_random(rrsp) % 547 (nrealreaders * 2 * longdelay * uspertick); 548 if (!delay && in_task()) { 549 schedule_timeout_interruptible(longdelay); 550 rtrsp->rt_delay_jiffies = longdelay; 551 } else { 552 rcu_read_delay(rrsp, rtrsp); 553 } 554 } 555 556 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 557 { 558 srcu_read_unlock(srcu_ctlp, idx); 559 } 560 561 static unsigned long srcu_torture_completed(void) 562 { 563 return srcu_batches_completed(srcu_ctlp); 564 } 565 566 static void srcu_torture_deferred_free(struct rcu_torture *rp) 567 { 568 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 569 } 570 571 static void srcu_torture_synchronize(void) 572 { 573 synchronize_srcu(srcu_ctlp); 574 } 575 576 static void srcu_torture_call(struct rcu_head *head, 577 rcu_callback_t func) 578 { 579 call_srcu(srcu_ctlp, head, func); 580 } 581 582 static void srcu_torture_barrier(void) 583 { 584 srcu_barrier(srcu_ctlp); 585 } 586 587 static void srcu_torture_stats(void) 588 { 589 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 590 } 591 592 static void srcu_torture_synchronize_expedited(void) 593 { 594 synchronize_srcu_expedited(srcu_ctlp); 595 } 596 597 static struct rcu_torture_ops srcu_ops = { 598 .ttype = SRCU_FLAVOR, 599 .init = rcu_sync_torture_init, 600 .readlock = srcu_torture_read_lock, 601 .read_delay = srcu_read_delay, 602 .readunlock = srcu_torture_read_unlock, 603 .get_gp_seq = srcu_torture_completed, 604 .deferred_free = srcu_torture_deferred_free, 605 .sync = srcu_torture_synchronize, 606 .exp_sync = srcu_torture_synchronize_expedited, 607 .call = srcu_torture_call, 608 .cb_barrier = srcu_torture_barrier, 609 .stats = srcu_torture_stats, 610 .irq_capable = 1, 611 .name = "srcu" 612 }; 613 614 static void srcu_torture_init(void) 615 { 616 rcu_sync_torture_init(); 617 WARN_ON(init_srcu_struct(&srcu_ctld)); 618 srcu_ctlp = &srcu_ctld; 619 } 620 621 static void srcu_torture_cleanup(void) 622 { 623 cleanup_srcu_struct(&srcu_ctld); 624 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 625 } 626 627 /* As above, but dynamically allocated. */ 628 static struct rcu_torture_ops srcud_ops = { 629 .ttype = SRCU_FLAVOR, 630 .init = srcu_torture_init, 631 .cleanup = srcu_torture_cleanup, 632 .readlock = srcu_torture_read_lock, 633 .read_delay = srcu_read_delay, 634 .readunlock = srcu_torture_read_unlock, 635 .get_gp_seq = srcu_torture_completed, 636 .deferred_free = srcu_torture_deferred_free, 637 .sync = srcu_torture_synchronize, 638 .exp_sync = srcu_torture_synchronize_expedited, 639 .call = srcu_torture_call, 640 .cb_barrier = srcu_torture_barrier, 641 .stats = srcu_torture_stats, 642 .irq_capable = 1, 643 .name = "srcud" 644 }; 645 646 /* As above, but broken due to inappropriate reader extension. */ 647 static struct rcu_torture_ops busted_srcud_ops = { 648 .ttype = SRCU_FLAVOR, 649 .init = srcu_torture_init, 650 .cleanup = srcu_torture_cleanup, 651 .readlock = srcu_torture_read_lock, 652 .read_delay = rcu_read_delay, 653 .readunlock = srcu_torture_read_unlock, 654 .get_gp_seq = srcu_torture_completed, 655 .deferred_free = srcu_torture_deferred_free, 656 .sync = srcu_torture_synchronize, 657 .exp_sync = srcu_torture_synchronize_expedited, 658 .call = srcu_torture_call, 659 .cb_barrier = srcu_torture_barrier, 660 .stats = srcu_torture_stats, 661 .irq_capable = 1, 662 .extendables = RCUTORTURE_MAX_EXTEND, 663 .name = "busted_srcud" 664 }; 665 666 /* 667 * Definitions for RCU-tasks torture testing. 668 */ 669 670 static int tasks_torture_read_lock(void) 671 { 672 return 0; 673 } 674 675 static void tasks_torture_read_unlock(int idx) 676 { 677 } 678 679 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 680 { 681 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 682 } 683 684 static void synchronize_rcu_mult_test(void) 685 { 686 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 687 } 688 689 static struct rcu_torture_ops tasks_ops = { 690 .ttype = RCU_TASKS_FLAVOR, 691 .init = rcu_sync_torture_init, 692 .readlock = tasks_torture_read_lock, 693 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 694 .readunlock = tasks_torture_read_unlock, 695 .get_gp_seq = rcu_no_completed, 696 .deferred_free = rcu_tasks_torture_deferred_free, 697 .sync = synchronize_rcu_tasks, 698 .exp_sync = synchronize_rcu_mult_test, 699 .call = call_rcu_tasks, 700 .cb_barrier = rcu_barrier_tasks, 701 .fqs = NULL, 702 .stats = NULL, 703 .irq_capable = 1, 704 .slow_gps = 1, 705 .name = "tasks" 706 }; 707 708 /* 709 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 710 * This implementation does not necessarily work well with CPU hotplug. 711 */ 712 713 static void synchronize_rcu_trivial(void) 714 { 715 int cpu; 716 717 for_each_online_cpu(cpu) { 718 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 719 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 720 } 721 } 722 723 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 724 { 725 preempt_disable(); 726 return 0; 727 } 728 729 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 730 { 731 preempt_enable(); 732 } 733 734 static struct rcu_torture_ops trivial_ops = { 735 .ttype = RCU_TRIVIAL_FLAVOR, 736 .init = rcu_sync_torture_init, 737 .readlock = rcu_torture_read_lock_trivial, 738 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 739 .readunlock = rcu_torture_read_unlock_trivial, 740 .get_gp_seq = rcu_no_completed, 741 .sync = synchronize_rcu_trivial, 742 .exp_sync = synchronize_rcu_trivial, 743 .fqs = NULL, 744 .stats = NULL, 745 .irq_capable = 1, 746 .name = "trivial" 747 }; 748 749 /* 750 * Definitions for rude RCU-tasks torture testing. 751 */ 752 753 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 754 { 755 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 756 } 757 758 static struct rcu_torture_ops tasks_rude_ops = { 759 .ttype = RCU_TASKS_RUDE_FLAVOR, 760 .init = rcu_sync_torture_init, 761 .readlock = rcu_torture_read_lock_trivial, 762 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 763 .readunlock = rcu_torture_read_unlock_trivial, 764 .get_gp_seq = rcu_no_completed, 765 .deferred_free = rcu_tasks_rude_torture_deferred_free, 766 .sync = synchronize_rcu_tasks_rude, 767 .exp_sync = synchronize_rcu_tasks_rude, 768 .call = call_rcu_tasks_rude, 769 .cb_barrier = rcu_barrier_tasks_rude, 770 .fqs = NULL, 771 .stats = NULL, 772 .irq_capable = 1, 773 .name = "tasks-rude" 774 }; 775 776 /* 777 * Definitions for tracing RCU-tasks torture testing. 778 */ 779 780 static int tasks_tracing_torture_read_lock(void) 781 { 782 rcu_read_lock_trace(); 783 return 0; 784 } 785 786 static void tasks_tracing_torture_read_unlock(int idx) 787 { 788 rcu_read_unlock_trace(); 789 } 790 791 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 792 { 793 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 794 } 795 796 static struct rcu_torture_ops tasks_tracing_ops = { 797 .ttype = RCU_TASKS_TRACING_FLAVOR, 798 .init = rcu_sync_torture_init, 799 .readlock = tasks_tracing_torture_read_lock, 800 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 801 .readunlock = tasks_tracing_torture_read_unlock, 802 .get_gp_seq = rcu_no_completed, 803 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 804 .sync = synchronize_rcu_tasks_trace, 805 .exp_sync = synchronize_rcu_tasks_trace, 806 .call = call_rcu_tasks_trace, 807 .cb_barrier = rcu_barrier_tasks_trace, 808 .fqs = NULL, 809 .stats = NULL, 810 .irq_capable = 1, 811 .slow_gps = 1, 812 .name = "tasks-tracing" 813 }; 814 815 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 816 { 817 if (!cur_ops->gp_diff) 818 return new - old; 819 return cur_ops->gp_diff(new, old); 820 } 821 822 static bool __maybe_unused torturing_tasks(void) 823 { 824 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 825 } 826 827 /* 828 * RCU torture priority-boost testing. Runs one real-time thread per 829 * CPU for moderate bursts, repeatedly registering RCU callbacks and 830 * spinning waiting for them to be invoked. If a given callback takes 831 * too long to be invoked, we assume that priority inversion has occurred. 832 */ 833 834 struct rcu_boost_inflight { 835 struct rcu_head rcu; 836 int inflight; 837 }; 838 839 static void rcu_torture_boost_cb(struct rcu_head *head) 840 { 841 struct rcu_boost_inflight *rbip = 842 container_of(head, struct rcu_boost_inflight, rcu); 843 844 /* Ensure RCU-core accesses precede clearing ->inflight */ 845 smp_store_release(&rbip->inflight, 0); 846 } 847 848 static int old_rt_runtime = -1; 849 850 static void rcu_torture_disable_rt_throttle(void) 851 { 852 /* 853 * Disable RT throttling so that rcutorture's boost threads don't get 854 * throttled. Only possible if rcutorture is built-in otherwise the 855 * user should manually do this by setting the sched_rt_period_us and 856 * sched_rt_runtime sysctls. 857 */ 858 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 859 return; 860 861 old_rt_runtime = sysctl_sched_rt_runtime; 862 sysctl_sched_rt_runtime = -1; 863 } 864 865 static void rcu_torture_enable_rt_throttle(void) 866 { 867 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 868 return; 869 870 sysctl_sched_rt_runtime = old_rt_runtime; 871 old_rt_runtime = -1; 872 } 873 874 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 875 { 876 if (end - start > test_boost_duration * HZ - HZ / 2) { 877 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 878 n_rcu_torture_boost_failure++; 879 880 return true; /* failed */ 881 } 882 883 return false; /* passed */ 884 } 885 886 static int rcu_torture_boost(void *arg) 887 { 888 unsigned long call_rcu_time; 889 unsigned long endtime; 890 unsigned long oldstarttime; 891 struct rcu_boost_inflight rbi = { .inflight = 0 }; 892 struct sched_param sp; 893 894 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 895 896 /* Set real-time priority. */ 897 sp.sched_priority = 1; 898 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { 899 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); 900 n_rcu_torture_boost_rterror++; 901 } 902 903 init_rcu_head_on_stack(&rbi.rcu); 904 /* Each pass through the following loop does one boost-test cycle. */ 905 do { 906 /* Track if the test failed already in this test interval? */ 907 bool failed = false; 908 909 /* Increment n_rcu_torture_boosts once per boost-test */ 910 while (!kthread_should_stop()) { 911 if (mutex_trylock(&boost_mutex)) { 912 n_rcu_torture_boosts++; 913 mutex_unlock(&boost_mutex); 914 break; 915 } 916 schedule_timeout_uninterruptible(1); 917 } 918 if (kthread_should_stop()) 919 goto checkwait; 920 921 /* Wait for the next test interval. */ 922 oldstarttime = boost_starttime; 923 while (time_before(jiffies, oldstarttime)) { 924 schedule_timeout_interruptible(oldstarttime - jiffies); 925 stutter_wait("rcu_torture_boost"); 926 if (torture_must_stop()) 927 goto checkwait; 928 } 929 930 /* Do one boost-test interval. */ 931 endtime = oldstarttime + test_boost_duration * HZ; 932 call_rcu_time = jiffies; 933 while (time_before(jiffies, endtime)) { 934 /* If we don't have a callback in flight, post one. */ 935 if (!smp_load_acquire(&rbi.inflight)) { 936 /* RCU core before ->inflight = 1. */ 937 smp_store_release(&rbi.inflight, 1); 938 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 939 /* Check if the boost test failed */ 940 failed = failed || 941 rcu_torture_boost_failed(call_rcu_time, 942 jiffies); 943 call_rcu_time = jiffies; 944 } 945 stutter_wait("rcu_torture_boost"); 946 if (torture_must_stop()) 947 goto checkwait; 948 } 949 950 /* 951 * If boost never happened, then inflight will always be 1, in 952 * this case the boost check would never happen in the above 953 * loop so do another one here. 954 */ 955 if (!failed && smp_load_acquire(&rbi.inflight)) 956 rcu_torture_boost_failed(call_rcu_time, jiffies); 957 958 /* 959 * Set the start time of the next test interval. 960 * Yes, this is vulnerable to long delays, but such 961 * delays simply cause a false negative for the next 962 * interval. Besides, we are running at RT priority, 963 * so delays should be relatively rare. 964 */ 965 while (oldstarttime == boost_starttime && 966 !kthread_should_stop()) { 967 if (mutex_trylock(&boost_mutex)) { 968 boost_starttime = jiffies + 969 test_boost_interval * HZ; 970 mutex_unlock(&boost_mutex); 971 break; 972 } 973 schedule_timeout_uninterruptible(1); 974 } 975 976 /* Go do the stutter. */ 977 checkwait: stutter_wait("rcu_torture_boost"); 978 } while (!torture_must_stop()); 979 980 /* Clean up and exit. */ 981 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 982 torture_shutdown_absorb("rcu_torture_boost"); 983 schedule_timeout_uninterruptible(1); 984 } 985 destroy_rcu_head_on_stack(&rbi.rcu); 986 torture_kthread_stopping("rcu_torture_boost"); 987 return 0; 988 } 989 990 /* 991 * RCU torture force-quiescent-state kthread. Repeatedly induces 992 * bursts of calls to force_quiescent_state(), increasing the probability 993 * of occurrence of some important types of race conditions. 994 */ 995 static int 996 rcu_torture_fqs(void *arg) 997 { 998 unsigned long fqs_resume_time; 999 int fqs_burst_remaining; 1000 1001 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1002 do { 1003 fqs_resume_time = jiffies + fqs_stutter * HZ; 1004 while (time_before(jiffies, fqs_resume_time) && 1005 !kthread_should_stop()) { 1006 schedule_timeout_interruptible(1); 1007 } 1008 fqs_burst_remaining = fqs_duration; 1009 while (fqs_burst_remaining > 0 && 1010 !kthread_should_stop()) { 1011 cur_ops->fqs(); 1012 udelay(fqs_holdoff); 1013 fqs_burst_remaining -= fqs_holdoff; 1014 } 1015 stutter_wait("rcu_torture_fqs"); 1016 } while (!torture_must_stop()); 1017 torture_kthread_stopping("rcu_torture_fqs"); 1018 return 0; 1019 } 1020 1021 /* 1022 * RCU torture writer kthread. Repeatedly substitutes a new structure 1023 * for that pointed to by rcu_torture_current, freeing the old structure 1024 * after a series of grace periods (the "pipeline"). 1025 */ 1026 static int 1027 rcu_torture_writer(void *arg) 1028 { 1029 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1030 int expediting = 0; 1031 unsigned long gp_snap; 1032 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1033 bool gp_sync1 = gp_sync; 1034 int i; 1035 struct rcu_torture *rp; 1036 struct rcu_torture *old_rp; 1037 static DEFINE_TORTURE_RANDOM(rand); 1038 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, 1039 RTWS_COND_GET, RTWS_SYNC }; 1040 int nsynctypes = 0; 1041 1042 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1043 if (!can_expedite) 1044 pr_alert("%s" TORTURE_FLAG 1045 " GP expediting controlled from boot/sysfs for %s.\n", 1046 torture_type, cur_ops->name); 1047 1048 /* Initialize synctype[] array. If none set, take default. */ 1049 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1) 1050 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true; 1051 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) { 1052 synctype[nsynctypes++] = RTWS_COND_GET; 1053 pr_info("%s: Testing conditional GPs.\n", __func__); 1054 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) { 1055 pr_alert("%s: gp_cond without primitives.\n", __func__); 1056 } 1057 if (gp_exp1 && cur_ops->exp_sync) { 1058 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1059 pr_info("%s: Testing expedited GPs.\n", __func__); 1060 } else if (gp_exp && !cur_ops->exp_sync) { 1061 pr_alert("%s: gp_exp without primitives.\n", __func__); 1062 } 1063 if (gp_normal1 && cur_ops->deferred_free) { 1064 synctype[nsynctypes++] = RTWS_DEF_FREE; 1065 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1066 } else if (gp_normal && !cur_ops->deferred_free) { 1067 pr_alert("%s: gp_normal without primitives.\n", __func__); 1068 } 1069 if (gp_sync1 && cur_ops->sync) { 1070 synctype[nsynctypes++] = RTWS_SYNC; 1071 pr_info("%s: Testing normal GPs.\n", __func__); 1072 } else if (gp_sync && !cur_ops->sync) { 1073 pr_alert("%s: gp_sync without primitives.\n", __func__); 1074 } 1075 if (WARN_ONCE(nsynctypes == 0, 1076 "rcu_torture_writer: No update-side primitives.\n")) { 1077 /* 1078 * No updates primitives, so don't try updating. 1079 * The resulting test won't be testing much, hence the 1080 * above WARN_ONCE(). 1081 */ 1082 rcu_torture_writer_state = RTWS_STOPPING; 1083 torture_kthread_stopping("rcu_torture_writer"); 1084 } 1085 1086 do { 1087 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1088 schedule_timeout_uninterruptible(1); 1089 rp = rcu_torture_alloc(); 1090 if (rp == NULL) 1091 continue; 1092 rp->rtort_pipe_count = 0; 1093 rcu_torture_writer_state = RTWS_DELAY; 1094 udelay(torture_random(&rand) & 0x3ff); 1095 rcu_torture_writer_state = RTWS_REPLACE; 1096 old_rp = rcu_dereference_check(rcu_torture_current, 1097 current == writer_task); 1098 rp->rtort_mbtest = 1; 1099 rcu_assign_pointer(rcu_torture_current, rp); 1100 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1101 if (old_rp) { 1102 i = old_rp->rtort_pipe_count; 1103 if (i > RCU_TORTURE_PIPE_LEN) 1104 i = RCU_TORTURE_PIPE_LEN; 1105 atomic_inc(&rcu_torture_wcount[i]); 1106 WRITE_ONCE(old_rp->rtort_pipe_count, 1107 old_rp->rtort_pipe_count + 1); 1108 switch (synctype[torture_random(&rand) % nsynctypes]) { 1109 case RTWS_DEF_FREE: 1110 rcu_torture_writer_state = RTWS_DEF_FREE; 1111 cur_ops->deferred_free(old_rp); 1112 break; 1113 case RTWS_EXP_SYNC: 1114 rcu_torture_writer_state = RTWS_EXP_SYNC; 1115 cur_ops->exp_sync(); 1116 rcu_torture_pipe_update(old_rp); 1117 break; 1118 case RTWS_COND_GET: 1119 rcu_torture_writer_state = RTWS_COND_GET; 1120 gp_snap = cur_ops->get_state(); 1121 i = torture_random(&rand) % 16; 1122 if (i != 0) 1123 schedule_timeout_interruptible(i); 1124 udelay(torture_random(&rand) % 1000); 1125 rcu_torture_writer_state = RTWS_COND_SYNC; 1126 cur_ops->cond_sync(gp_snap); 1127 rcu_torture_pipe_update(old_rp); 1128 break; 1129 case RTWS_SYNC: 1130 rcu_torture_writer_state = RTWS_SYNC; 1131 cur_ops->sync(); 1132 rcu_torture_pipe_update(old_rp); 1133 break; 1134 default: 1135 WARN_ON_ONCE(1); 1136 break; 1137 } 1138 } 1139 WRITE_ONCE(rcu_torture_current_version, 1140 rcu_torture_current_version + 1); 1141 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1142 if (can_expedite && 1143 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1144 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1145 if (expediting >= 0) 1146 rcu_expedite_gp(); 1147 else 1148 rcu_unexpedite_gp(); 1149 if (++expediting > 3) 1150 expediting = -expediting; 1151 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1152 can_expedite = !rcu_gp_is_expedited() && 1153 !rcu_gp_is_normal(); 1154 } 1155 rcu_torture_writer_state = RTWS_STUTTER; 1156 if (stutter_wait("rcu_torture_writer") && 1157 !READ_ONCE(rcu_fwd_cb_nodelay) && 1158 !cur_ops->slow_gps && 1159 !torture_must_stop() && 1160 rcu_inkernel_boot_has_ended()) 1161 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1162 if (list_empty(&rcu_tortures[i].rtort_free) && 1163 rcu_access_pointer(rcu_torture_current) != 1164 &rcu_tortures[i]) { 1165 rcu_ftrace_dump(DUMP_ALL); 1166 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1167 } 1168 } while (!torture_must_stop()); 1169 /* Reset expediting back to unexpedited. */ 1170 if (expediting > 0) 1171 expediting = -expediting; 1172 while (can_expedite && expediting++ < 0) 1173 rcu_unexpedite_gp(); 1174 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1175 if (!can_expedite) 1176 pr_alert("%s" TORTURE_FLAG 1177 " Dynamic grace-period expediting was disabled.\n", 1178 torture_type); 1179 rcu_torture_writer_state = RTWS_STOPPING; 1180 torture_kthread_stopping("rcu_torture_writer"); 1181 return 0; 1182 } 1183 1184 /* 1185 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1186 * delay between calls. 1187 */ 1188 static int 1189 rcu_torture_fakewriter(void *arg) 1190 { 1191 DEFINE_TORTURE_RANDOM(rand); 1192 1193 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1194 set_user_nice(current, MAX_NICE); 1195 1196 do { 1197 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); 1198 udelay(torture_random(&rand) & 0x3ff); 1199 if (cur_ops->cb_barrier != NULL && 1200 torture_random(&rand) % (nfakewriters * 8) == 0) { 1201 cur_ops->cb_barrier(); 1202 } else if (gp_normal == gp_exp) { 1203 if (cur_ops->sync && torture_random(&rand) & 0x80) 1204 cur_ops->sync(); 1205 else if (cur_ops->exp_sync) 1206 cur_ops->exp_sync(); 1207 } else if (gp_normal && cur_ops->sync) { 1208 cur_ops->sync(); 1209 } else if (cur_ops->exp_sync) { 1210 cur_ops->exp_sync(); 1211 } 1212 stutter_wait("rcu_torture_fakewriter"); 1213 } while (!torture_must_stop()); 1214 1215 torture_kthread_stopping("rcu_torture_fakewriter"); 1216 return 0; 1217 } 1218 1219 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1220 { 1221 kfree(rhp); 1222 } 1223 1224 /* 1225 * Do one extension of an RCU read-side critical section using the 1226 * current reader state in readstate (set to zero for initial entry 1227 * to extended critical section), set the new state as specified by 1228 * newstate (set to zero for final exit from extended critical section), 1229 * and random-number-generator state in trsp. If this is neither the 1230 * beginning or end of the critical section and if there was actually a 1231 * change, do a ->read_delay(). 1232 */ 1233 static void rcutorture_one_extend(int *readstate, int newstate, 1234 struct torture_random_state *trsp, 1235 struct rt_read_seg *rtrsp) 1236 { 1237 unsigned long flags; 1238 int idxnew = -1; 1239 int idxold = *readstate; 1240 int statesnew = ~*readstate & newstate; 1241 int statesold = *readstate & ~newstate; 1242 1243 WARN_ON_ONCE(idxold < 0); 1244 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1245 rtrsp->rt_readstate = newstate; 1246 1247 /* First, put new protection in place to avoid critical-section gap. */ 1248 if (statesnew & RCUTORTURE_RDR_BH) 1249 local_bh_disable(); 1250 if (statesnew & RCUTORTURE_RDR_IRQ) 1251 local_irq_disable(); 1252 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1253 preempt_disable(); 1254 if (statesnew & RCUTORTURE_RDR_RBH) 1255 rcu_read_lock_bh(); 1256 if (statesnew & RCUTORTURE_RDR_SCHED) 1257 rcu_read_lock_sched(); 1258 if (statesnew & RCUTORTURE_RDR_RCU) 1259 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1260 1261 /* Next, remove old protection, irq first due to bh conflict. */ 1262 if (statesold & RCUTORTURE_RDR_IRQ) 1263 local_irq_enable(); 1264 if (statesold & RCUTORTURE_RDR_BH) 1265 local_bh_enable(); 1266 if (statesold & RCUTORTURE_RDR_PREEMPT) 1267 preempt_enable(); 1268 if (statesold & RCUTORTURE_RDR_RBH) 1269 rcu_read_unlock_bh(); 1270 if (statesold & RCUTORTURE_RDR_SCHED) 1271 rcu_read_unlock_sched(); 1272 if (statesold & RCUTORTURE_RDR_RCU) { 1273 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1274 1275 if (lockit) 1276 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1277 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1278 if (lockit) 1279 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1280 } 1281 1282 /* Delay if neither beginning nor end and there was a change. */ 1283 if ((statesnew || statesold) && *readstate && newstate) 1284 cur_ops->read_delay(trsp, rtrsp); 1285 1286 /* Update the reader state. */ 1287 if (idxnew == -1) 1288 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1289 WARN_ON_ONCE(idxnew < 0); 1290 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1291 *readstate = idxnew | newstate; 1292 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1293 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1294 } 1295 1296 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1297 static int rcutorture_extend_mask_max(void) 1298 { 1299 int mask; 1300 1301 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1302 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1303 mask = mask | RCUTORTURE_RDR_RCU; 1304 return mask; 1305 } 1306 1307 /* Return a random protection state mask, but with at least one bit set. */ 1308 static int 1309 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1310 { 1311 int mask = rcutorture_extend_mask_max(); 1312 unsigned long randmask1 = torture_random(trsp) >> 8; 1313 unsigned long randmask2 = randmask1 >> 3; 1314 1315 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1316 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1317 if (!(randmask1 & 0x7)) 1318 mask = mask & randmask2; 1319 else 1320 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1321 /* Can't enable bh w/irq disabled. */ 1322 if ((mask & RCUTORTURE_RDR_IRQ) && 1323 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1324 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1325 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1326 return mask ?: RCUTORTURE_RDR_RCU; 1327 } 1328 1329 /* 1330 * Do a randomly selected number of extensions of an existing RCU read-side 1331 * critical section. 1332 */ 1333 static struct rt_read_seg * 1334 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1335 struct rt_read_seg *rtrsp) 1336 { 1337 int i; 1338 int j; 1339 int mask = rcutorture_extend_mask_max(); 1340 1341 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1342 if (!((mask - 1) & mask)) 1343 return rtrsp; /* Current RCU reader not extendable. */ 1344 /* Bias towards larger numbers of loops. */ 1345 i = (torture_random(trsp) >> 3); 1346 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1347 for (j = 0; j < i; j++) { 1348 mask = rcutorture_extend_mask(*readstate, trsp); 1349 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1350 } 1351 return &rtrsp[j]; 1352 } 1353 1354 /* 1355 * Do one read-side critical section, returning false if there was 1356 * no data to read. Can be invoked both from process context and 1357 * from a timer handler. 1358 */ 1359 static bool rcu_torture_one_read(struct torture_random_state *trsp) 1360 { 1361 int i; 1362 unsigned long started; 1363 unsigned long completed; 1364 int newstate; 1365 struct rcu_torture *p; 1366 int pipe_count; 1367 int readstate = 0; 1368 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1369 struct rt_read_seg *rtrsp = &rtseg[0]; 1370 struct rt_read_seg *rtrsp1; 1371 unsigned long long ts; 1372 1373 newstate = rcutorture_extend_mask(readstate, trsp); 1374 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1375 started = cur_ops->get_gp_seq(); 1376 ts = rcu_trace_clock_local(); 1377 p = rcu_dereference_check(rcu_torture_current, 1378 rcu_read_lock_bh_held() || 1379 rcu_read_lock_sched_held() || 1380 srcu_read_lock_held(srcu_ctlp) || 1381 rcu_read_lock_trace_held() || 1382 torturing_tasks()); 1383 if (p == NULL) { 1384 /* Wait for rcu_torture_writer to get underway */ 1385 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1386 return false; 1387 } 1388 if (p->rtort_mbtest == 0) 1389 atomic_inc(&n_rcu_torture_mberror); 1390 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1391 preempt_disable(); 1392 pipe_count = READ_ONCE(p->rtort_pipe_count); 1393 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1394 /* Should not happen, but... */ 1395 pipe_count = RCU_TORTURE_PIPE_LEN; 1396 } 1397 completed = cur_ops->get_gp_seq(); 1398 if (pipe_count > 1) { 1399 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1400 ts, started, completed); 1401 rcu_ftrace_dump(DUMP_ALL); 1402 } 1403 __this_cpu_inc(rcu_torture_count[pipe_count]); 1404 completed = rcutorture_seq_diff(completed, started); 1405 if (completed > RCU_TORTURE_PIPE_LEN) { 1406 /* Should not happen, but... */ 1407 completed = RCU_TORTURE_PIPE_LEN; 1408 } 1409 __this_cpu_inc(rcu_torture_batch[completed]); 1410 preempt_enable(); 1411 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1412 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1413 1414 /* If error or close call, record the sequence of reader protections. */ 1415 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1416 i = 0; 1417 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1418 err_segs[i++] = *rtrsp1; 1419 rt_read_nsegs = i; 1420 } 1421 1422 return true; 1423 } 1424 1425 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1426 1427 /* 1428 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1429 * incrementing the corresponding element of the pipeline array. The 1430 * counter in the element should never be greater than 1, otherwise, the 1431 * RCU implementation is broken. 1432 */ 1433 static void rcu_torture_timer(struct timer_list *unused) 1434 { 1435 atomic_long_inc(&n_rcu_torture_timers); 1436 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand)); 1437 1438 /* Test call_rcu() invocation from interrupt handler. */ 1439 if (cur_ops->call) { 1440 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1441 1442 if (rhp) 1443 cur_ops->call(rhp, rcu_torture_timer_cb); 1444 } 1445 } 1446 1447 /* 1448 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1449 * incrementing the corresponding element of the pipeline array. The 1450 * counter in the element should never be greater than 1, otherwise, the 1451 * RCU implementation is broken. 1452 */ 1453 static int 1454 rcu_torture_reader(void *arg) 1455 { 1456 unsigned long lastsleep = jiffies; 1457 long myid = (long)arg; 1458 int mynumonline = myid; 1459 DEFINE_TORTURE_RANDOM(rand); 1460 struct timer_list t; 1461 1462 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1463 set_user_nice(current, MAX_NICE); 1464 if (irqreader && cur_ops->irq_capable) 1465 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1466 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1467 do { 1468 if (irqreader && cur_ops->irq_capable) { 1469 if (!timer_pending(&t)) 1470 mod_timer(&t, jiffies + 1); 1471 } 1472 if (!rcu_torture_one_read(&rand) && !torture_must_stop()) 1473 schedule_timeout_interruptible(HZ); 1474 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1475 schedule_timeout_interruptible(1); 1476 lastsleep = jiffies + 10; 1477 } 1478 while (num_online_cpus() < mynumonline && !torture_must_stop()) 1479 schedule_timeout_interruptible(HZ / 5); 1480 stutter_wait("rcu_torture_reader"); 1481 } while (!torture_must_stop()); 1482 if (irqreader && cur_ops->irq_capable) { 1483 del_timer_sync(&t); 1484 destroy_timer_on_stack(&t); 1485 } 1486 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1487 torture_kthread_stopping("rcu_torture_reader"); 1488 return 0; 1489 } 1490 1491 /* 1492 * Print torture statistics. Caller must ensure that there is only 1493 * one call to this function at a given time!!! This is normally 1494 * accomplished by relying on the module system to only have one copy 1495 * of the module loaded, and then by giving the rcu_torture_stats 1496 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1497 * thread is not running). 1498 */ 1499 static void 1500 rcu_torture_stats_print(void) 1501 { 1502 int cpu; 1503 int i; 1504 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1505 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1506 struct rcu_torture *rtcp; 1507 static unsigned long rtcv_snap = ULONG_MAX; 1508 static bool splatted; 1509 struct task_struct *wtp; 1510 1511 for_each_possible_cpu(cpu) { 1512 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1513 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1514 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1515 } 1516 } 1517 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1518 if (pipesummary[i] != 0) 1519 break; 1520 } 1521 1522 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1523 rtcp = rcu_access_pointer(rcu_torture_current); 1524 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1525 rtcp, 1526 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1527 rcu_torture_current_version, 1528 list_empty(&rcu_torture_freelist), 1529 atomic_read(&n_rcu_torture_alloc), 1530 atomic_read(&n_rcu_torture_alloc_fail), 1531 atomic_read(&n_rcu_torture_free)); 1532 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ", 1533 atomic_read(&n_rcu_torture_mberror), 1534 n_rcu_torture_barrier_error, 1535 n_rcu_torture_boost_ktrerror, 1536 n_rcu_torture_boost_rterror); 1537 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1538 n_rcu_torture_boost_failure, 1539 n_rcu_torture_boosts, 1540 atomic_long_read(&n_rcu_torture_timers)); 1541 torture_onoff_stats(); 1542 pr_cont("barrier: %ld/%ld:%ld\n", 1543 data_race(n_barrier_successes), 1544 data_race(n_barrier_attempts), 1545 data_race(n_rcu_torture_barrier_error)); 1546 1547 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1548 if (atomic_read(&n_rcu_torture_mberror) || 1549 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1550 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1551 i > 1) { 1552 pr_cont("%s", "!!! "); 1553 atomic_inc(&n_rcu_torture_error); 1554 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1555 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1556 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1557 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1558 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed 1559 WARN_ON_ONCE(i > 1); // Too-short grace period 1560 } 1561 pr_cont("Reader Pipe: "); 1562 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1563 pr_cont(" %ld", pipesummary[i]); 1564 pr_cont("\n"); 1565 1566 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1567 pr_cont("Reader Batch: "); 1568 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1569 pr_cont(" %ld", batchsummary[i]); 1570 pr_cont("\n"); 1571 1572 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1573 pr_cont("Free-Block Circulation: "); 1574 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1575 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1576 } 1577 pr_cont("\n"); 1578 1579 if (cur_ops->stats) 1580 cur_ops->stats(); 1581 if (rtcv_snap == rcu_torture_current_version && 1582 rcu_access_pointer(rcu_torture_current) && 1583 !rcu_stall_is_suppressed()) { 1584 int __maybe_unused flags = 0; 1585 unsigned long __maybe_unused gp_seq = 0; 1586 1587 rcutorture_get_gp_data(cur_ops->ttype, 1588 &flags, &gp_seq); 1589 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1590 &flags, &gp_seq); 1591 wtp = READ_ONCE(writer_task); 1592 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1593 rcu_torture_writer_state_getname(), 1594 rcu_torture_writer_state, gp_seq, flags, 1595 wtp == NULL ? ~0UL : wtp->state, 1596 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1597 if (!splatted && wtp) { 1598 sched_show_task(wtp); 1599 splatted = true; 1600 } 1601 show_rcu_gp_kthreads(); 1602 rcu_ftrace_dump(DUMP_ALL); 1603 } 1604 rtcv_snap = rcu_torture_current_version; 1605 } 1606 1607 /* 1608 * Periodically prints torture statistics, if periodic statistics printing 1609 * was specified via the stat_interval module parameter. 1610 */ 1611 static int 1612 rcu_torture_stats(void *arg) 1613 { 1614 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1615 do { 1616 schedule_timeout_interruptible(stat_interval * HZ); 1617 rcu_torture_stats_print(); 1618 torture_shutdown_absorb("rcu_torture_stats"); 1619 } while (!torture_must_stop()); 1620 torture_kthread_stopping("rcu_torture_stats"); 1621 return 0; 1622 } 1623 1624 static void 1625 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1626 { 1627 pr_alert("%s" TORTURE_FLAG 1628 "--- %s: nreaders=%d nfakewriters=%d " 1629 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1630 "shuffle_interval=%d stutter=%d irqreader=%d " 1631 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1632 "test_boost=%d/%d test_boost_interval=%d " 1633 "test_boost_duration=%d shutdown_secs=%d " 1634 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1635 "stall_cpu_block=%d " 1636 "n_barrier_cbs=%d " 1637 "onoff_interval=%d onoff_holdoff=%d\n", 1638 torture_type, tag, nrealreaders, nfakewriters, 1639 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1640 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1641 test_boost, cur_ops->can_boost, 1642 test_boost_interval, test_boost_duration, shutdown_secs, 1643 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1644 stall_cpu_block, 1645 n_barrier_cbs, 1646 onoff_interval, onoff_holdoff); 1647 } 1648 1649 static int rcutorture_booster_cleanup(unsigned int cpu) 1650 { 1651 struct task_struct *t; 1652 1653 if (boost_tasks[cpu] == NULL) 1654 return 0; 1655 mutex_lock(&boost_mutex); 1656 t = boost_tasks[cpu]; 1657 boost_tasks[cpu] = NULL; 1658 rcu_torture_enable_rt_throttle(); 1659 mutex_unlock(&boost_mutex); 1660 1661 /* This must be outside of the mutex, otherwise deadlock! */ 1662 torture_stop_kthread(rcu_torture_boost, t); 1663 return 0; 1664 } 1665 1666 static int rcutorture_booster_init(unsigned int cpu) 1667 { 1668 int retval; 1669 1670 if (boost_tasks[cpu] != NULL) 1671 return 0; /* Already created, nothing more to do. */ 1672 1673 /* Don't allow time recalculation while creating a new task. */ 1674 mutex_lock(&boost_mutex); 1675 rcu_torture_disable_rt_throttle(); 1676 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1677 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1678 cpu_to_node(cpu), 1679 "rcu_torture_boost"); 1680 if (IS_ERR(boost_tasks[cpu])) { 1681 retval = PTR_ERR(boost_tasks[cpu]); 1682 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1683 n_rcu_torture_boost_ktrerror++; 1684 boost_tasks[cpu] = NULL; 1685 mutex_unlock(&boost_mutex); 1686 return retval; 1687 } 1688 kthread_bind(boost_tasks[cpu], cpu); 1689 wake_up_process(boost_tasks[cpu]); 1690 mutex_unlock(&boost_mutex); 1691 return 0; 1692 } 1693 1694 /* 1695 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1696 * induces a CPU stall for the time specified by stall_cpu. 1697 */ 1698 static int rcu_torture_stall(void *args) 1699 { 1700 int idx; 1701 unsigned long stop_at; 1702 1703 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1704 if (stall_cpu_holdoff > 0) { 1705 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1706 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1707 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1708 } 1709 if (!kthread_should_stop() && stall_gp_kthread > 0) { 1710 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 1711 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 1712 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 1713 if (kthread_should_stop()) 1714 break; 1715 schedule_timeout_uninterruptible(HZ); 1716 } 1717 } 1718 if (!kthread_should_stop() && stall_cpu > 0) { 1719 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 1720 stop_at = ktime_get_seconds() + stall_cpu; 1721 /* RCU CPU stall is expected behavior in following code. */ 1722 idx = cur_ops->readlock(); 1723 if (stall_cpu_irqsoff) 1724 local_irq_disable(); 1725 else if (!stall_cpu_block) 1726 preempt_disable(); 1727 pr_alert("rcu_torture_stall start on CPU %d.\n", 1728 raw_smp_processor_id()); 1729 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1730 stop_at)) 1731 if (stall_cpu_block) 1732 schedule_timeout_uninterruptible(HZ); 1733 if (stall_cpu_irqsoff) 1734 local_irq_enable(); 1735 else if (!stall_cpu_block) 1736 preempt_enable(); 1737 cur_ops->readunlock(idx); 1738 } 1739 pr_alert("rcu_torture_stall end.\n"); 1740 torture_shutdown_absorb("rcu_torture_stall"); 1741 while (!kthread_should_stop()) 1742 schedule_timeout_interruptible(10 * HZ); 1743 return 0; 1744 } 1745 1746 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1747 static int __init rcu_torture_stall_init(void) 1748 { 1749 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 1750 return 0; 1751 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1752 } 1753 1754 /* State structure for forward-progress self-propagating RCU callback. */ 1755 struct fwd_cb_state { 1756 struct rcu_head rh; 1757 int stop; 1758 }; 1759 1760 /* 1761 * Forward-progress self-propagating RCU callback function. Because 1762 * callbacks run from softirq, this function is an implicit RCU read-side 1763 * critical section. 1764 */ 1765 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1766 { 1767 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1768 1769 if (READ_ONCE(fcsp->stop)) { 1770 WRITE_ONCE(fcsp->stop, 2); 1771 return; 1772 } 1773 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1774 } 1775 1776 /* State for continuous-flood RCU callbacks. */ 1777 struct rcu_fwd_cb { 1778 struct rcu_head rh; 1779 struct rcu_fwd_cb *rfc_next; 1780 struct rcu_fwd *rfc_rfp; 1781 int rfc_gps; 1782 }; 1783 1784 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1785 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1786 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1787 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1788 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1789 1790 struct rcu_launder_hist { 1791 long n_launders; 1792 unsigned long launder_gp_seq; 1793 }; 1794 1795 struct rcu_fwd { 1796 spinlock_t rcu_fwd_lock; 1797 struct rcu_fwd_cb *rcu_fwd_cb_head; 1798 struct rcu_fwd_cb **rcu_fwd_cb_tail; 1799 long n_launders_cb; 1800 unsigned long rcu_fwd_startat; 1801 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1802 unsigned long rcu_launder_gp_seq_start; 1803 }; 1804 1805 static struct rcu_fwd *rcu_fwds; 1806 static bool rcu_fwd_emergency_stop; 1807 1808 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 1809 { 1810 unsigned long gps; 1811 unsigned long gps_old; 1812 int i; 1813 int j; 1814 1815 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 1816 if (rfp->n_launders_hist[i].n_launders > 0) 1817 break; 1818 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 1819 __func__, jiffies - rfp->rcu_fwd_startat); 1820 gps_old = rfp->rcu_launder_gp_seq_start; 1821 for (j = 0; j <= i; j++) { 1822 gps = rfp->n_launders_hist[j].launder_gp_seq; 1823 pr_cont(" %ds/%d: %ld:%ld", 1824 j + 1, FWD_CBS_HIST_DIV, 1825 rfp->n_launders_hist[j].n_launders, 1826 rcutorture_seq_diff(gps, gps_old)); 1827 gps_old = gps; 1828 } 1829 pr_cont("\n"); 1830 } 1831 1832 /* Callback function for continuous-flood RCU callbacks. */ 1833 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 1834 { 1835 unsigned long flags; 1836 int i; 1837 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 1838 struct rcu_fwd_cb **rfcpp; 1839 struct rcu_fwd *rfp = rfcp->rfc_rfp; 1840 1841 rfcp->rfc_next = NULL; 1842 rfcp->rfc_gps++; 1843 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1844 rfcpp = rfp->rcu_fwd_cb_tail; 1845 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 1846 WRITE_ONCE(*rfcpp, rfcp); 1847 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 1848 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 1849 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 1850 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 1851 rfp->n_launders_hist[i].n_launders++; 1852 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 1853 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1854 } 1855 1856 // Give the scheduler a chance, even on nohz_full CPUs. 1857 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 1858 { 1859 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 1860 // Real call_rcu() floods hit userspace, so emulate that. 1861 if (need_resched() || (iter & 0xfff)) 1862 schedule(); 1863 return; 1864 } 1865 // No userspace emulation: CB invocation throttles call_rcu() 1866 cond_resched(); 1867 } 1868 1869 /* 1870 * Free all callbacks on the rcu_fwd_cb_head list, either because the 1871 * test is over or because we hit an OOM event. 1872 */ 1873 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 1874 { 1875 unsigned long flags; 1876 unsigned long freed = 0; 1877 struct rcu_fwd_cb *rfcp; 1878 1879 for (;;) { 1880 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 1881 rfcp = rfp->rcu_fwd_cb_head; 1882 if (!rfcp) { 1883 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1884 break; 1885 } 1886 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 1887 if (!rfp->rcu_fwd_cb_head) 1888 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 1889 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 1890 kfree(rfcp); 1891 freed++; 1892 rcu_torture_fwd_prog_cond_resched(freed); 1893 if (tick_nohz_full_enabled()) { 1894 local_irq_save(flags); 1895 rcu_momentary_dyntick_idle(); 1896 local_irq_restore(flags); 1897 } 1898 } 1899 return freed; 1900 } 1901 1902 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 1903 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 1904 int *tested, int *tested_tries) 1905 { 1906 unsigned long cver; 1907 unsigned long dur; 1908 struct fwd_cb_state fcs; 1909 unsigned long gps; 1910 int idx; 1911 int sd; 1912 int sd4; 1913 bool selfpropcb = false; 1914 unsigned long stopat; 1915 static DEFINE_TORTURE_RANDOM(trs); 1916 1917 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { 1918 init_rcu_head_on_stack(&fcs.rh); 1919 selfpropcb = true; 1920 } 1921 1922 /* Tight loop containing cond_resched(). */ 1923 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1924 cur_ops->sync(); /* Later readers see above write. */ 1925 if (selfpropcb) { 1926 WRITE_ONCE(fcs.stop, 0); 1927 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 1928 } 1929 cver = READ_ONCE(rcu_torture_current_version); 1930 gps = cur_ops->get_gp_seq(); 1931 sd = cur_ops->stall_dur() + 1; 1932 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 1933 dur = sd4 + torture_random(&trs) % (sd - sd4); 1934 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1935 stopat = rfp->rcu_fwd_startat + dur; 1936 while (time_before(jiffies, stopat) && 1937 !shutdown_time_arrived() && 1938 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1939 idx = cur_ops->readlock(); 1940 udelay(10); 1941 cur_ops->readunlock(idx); 1942 if (!fwd_progress_need_resched || need_resched()) 1943 cond_resched(); 1944 } 1945 (*tested_tries)++; 1946 if (!time_before(jiffies, stopat) && 1947 !shutdown_time_arrived() && 1948 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 1949 (*tested)++; 1950 cver = READ_ONCE(rcu_torture_current_version) - cver; 1951 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 1952 WARN_ON(!cver && gps < 2); 1953 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 1954 } 1955 if (selfpropcb) { 1956 WRITE_ONCE(fcs.stop, 1); 1957 cur_ops->sync(); /* Wait for running CB to complete. */ 1958 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 1959 } 1960 1961 if (selfpropcb) { 1962 WARN_ON(READ_ONCE(fcs.stop) != 2); 1963 destroy_rcu_head_on_stack(&fcs.rh); 1964 } 1965 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 1966 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 1967 } 1968 1969 /* Carry out call_rcu() forward-progress testing. */ 1970 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 1971 { 1972 unsigned long cver; 1973 unsigned long flags; 1974 unsigned long gps; 1975 int i; 1976 long n_launders; 1977 long n_launders_cb_snap; 1978 long n_launders_sa; 1979 long n_max_cbs; 1980 long n_max_gps; 1981 struct rcu_fwd_cb *rfcp; 1982 struct rcu_fwd_cb *rfcpn; 1983 unsigned long stopat; 1984 unsigned long stoppedat; 1985 1986 if (READ_ONCE(rcu_fwd_emergency_stop)) 1987 return; /* Get out of the way quickly, no GP wait! */ 1988 if (!cur_ops->call) 1989 return; /* Can't do call_rcu() fwd prog without ->call. */ 1990 1991 /* Loop continuously posting RCU callbacks. */ 1992 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 1993 cur_ops->sync(); /* Later readers see above write. */ 1994 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 1995 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 1996 n_launders = 0; 1997 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 1998 n_launders_sa = 0; 1999 n_max_cbs = 0; 2000 n_max_gps = 0; 2001 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2002 rfp->n_launders_hist[i].n_launders = 0; 2003 cver = READ_ONCE(rcu_torture_current_version); 2004 gps = cur_ops->get_gp_seq(); 2005 rfp->rcu_launder_gp_seq_start = gps; 2006 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2007 while (time_before(jiffies, stopat) && 2008 !shutdown_time_arrived() && 2009 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2010 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2011 rfcpn = NULL; 2012 if (rfcp) 2013 rfcpn = READ_ONCE(rfcp->rfc_next); 2014 if (rfcpn) { 2015 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2016 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2017 break; 2018 rfp->rcu_fwd_cb_head = rfcpn; 2019 n_launders++; 2020 n_launders_sa++; 2021 } else { 2022 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2023 if (WARN_ON_ONCE(!rfcp)) { 2024 schedule_timeout_interruptible(1); 2025 continue; 2026 } 2027 n_max_cbs++; 2028 n_launders_sa = 0; 2029 rfcp->rfc_gps = 0; 2030 rfcp->rfc_rfp = rfp; 2031 } 2032 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2033 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2034 if (tick_nohz_full_enabled()) { 2035 local_irq_save(flags); 2036 rcu_momentary_dyntick_idle(); 2037 local_irq_restore(flags); 2038 } 2039 } 2040 stoppedat = jiffies; 2041 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2042 cver = READ_ONCE(rcu_torture_current_version) - cver; 2043 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2044 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2045 (void)rcu_torture_fwd_prog_cbfree(rfp); 2046 2047 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2048 !shutdown_time_arrived()) { 2049 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2050 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2051 __func__, 2052 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2053 n_launders + n_max_cbs - n_launders_cb_snap, 2054 n_launders, n_launders_sa, 2055 n_max_gps, n_max_cbs, cver, gps); 2056 rcu_torture_fwd_cb_hist(rfp); 2057 } 2058 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2059 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2060 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2061 } 2062 2063 2064 /* 2065 * OOM notifier, but this only prints diagnostic information for the 2066 * current forward-progress test. 2067 */ 2068 static int rcutorture_oom_notify(struct notifier_block *self, 2069 unsigned long notused, void *nfreed) 2070 { 2071 struct rcu_fwd *rfp = rcu_fwds; 2072 2073 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2074 __func__); 2075 rcu_torture_fwd_cb_hist(rfp); 2076 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2077 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2078 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2079 pr_info("%s: Freed %lu RCU callbacks.\n", 2080 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2081 rcu_barrier(); 2082 pr_info("%s: Freed %lu RCU callbacks.\n", 2083 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2084 rcu_barrier(); 2085 pr_info("%s: Freed %lu RCU callbacks.\n", 2086 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2087 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2088 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2089 pr_info("%s returning after OOM processing.\n", __func__); 2090 return NOTIFY_OK; 2091 } 2092 2093 static struct notifier_block rcutorture_oom_nb = { 2094 .notifier_call = rcutorture_oom_notify 2095 }; 2096 2097 /* Carry out grace-period forward-progress testing. */ 2098 static int rcu_torture_fwd_prog(void *args) 2099 { 2100 struct rcu_fwd *rfp = args; 2101 int tested = 0; 2102 int tested_tries = 0; 2103 2104 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2105 rcu_bind_current_to_nocb(); 2106 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2107 set_user_nice(current, MAX_NICE); 2108 do { 2109 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2110 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2111 register_oom_notifier(&rcutorture_oom_nb); 2112 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2113 rcu_inkernel_boot_has_ended()) 2114 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2115 if (rcu_inkernel_boot_has_ended()) 2116 rcu_torture_fwd_prog_cr(rfp); 2117 unregister_oom_notifier(&rcutorture_oom_nb); 2118 2119 /* Avoid slow periods, better to test when busy. */ 2120 stutter_wait("rcu_torture_fwd_prog"); 2121 } while (!torture_must_stop()); 2122 /* Short runs might not contain a valid forward-progress attempt. */ 2123 WARN_ON(!tested && tested_tries >= 5); 2124 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2125 torture_kthread_stopping("rcu_torture_fwd_prog"); 2126 return 0; 2127 } 2128 2129 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2130 static int __init rcu_torture_fwd_prog_init(void) 2131 { 2132 struct rcu_fwd *rfp; 2133 2134 if (!fwd_progress) 2135 return 0; /* Not requested, so don't do it. */ 2136 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || 2137 cur_ops == &rcu_busted_ops) { 2138 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2139 return 0; 2140 } 2141 if (stall_cpu > 0) { 2142 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2143 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2144 return -EINVAL; /* In module, can fail back to user. */ 2145 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2146 return 0; 2147 } 2148 if (fwd_progress_holdoff <= 0) 2149 fwd_progress_holdoff = 1; 2150 if (fwd_progress_div <= 0) 2151 fwd_progress_div = 4; 2152 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2153 if (!rfp) 2154 return -ENOMEM; 2155 spin_lock_init(&rfp->rcu_fwd_lock); 2156 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2157 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2158 } 2159 2160 /* Callback function for RCU barrier testing. */ 2161 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2162 { 2163 atomic_inc(&barrier_cbs_invoked); 2164 } 2165 2166 /* IPI handler to get callback posted on desired CPU, if online. */ 2167 static void rcu_torture_barrier1cb(void *rcu_void) 2168 { 2169 struct rcu_head *rhp = rcu_void; 2170 2171 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2172 } 2173 2174 /* kthread function to register callbacks used to test RCU barriers. */ 2175 static int rcu_torture_barrier_cbs(void *arg) 2176 { 2177 long myid = (long)arg; 2178 bool lastphase = 0; 2179 bool newphase; 2180 struct rcu_head rcu; 2181 2182 init_rcu_head_on_stack(&rcu); 2183 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2184 set_user_nice(current, MAX_NICE); 2185 do { 2186 wait_event(barrier_cbs_wq[myid], 2187 (newphase = 2188 smp_load_acquire(&barrier_phase)) != lastphase || 2189 torture_must_stop()); 2190 lastphase = newphase; 2191 if (torture_must_stop()) 2192 break; 2193 /* 2194 * The above smp_load_acquire() ensures barrier_phase load 2195 * is ordered before the following ->call(). 2196 */ 2197 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2198 &rcu, 1)) { 2199 // IPI failed, so use direct call from current CPU. 2200 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2201 } 2202 if (atomic_dec_and_test(&barrier_cbs_count)) 2203 wake_up(&barrier_wq); 2204 } while (!torture_must_stop()); 2205 if (cur_ops->cb_barrier != NULL) 2206 cur_ops->cb_barrier(); 2207 destroy_rcu_head_on_stack(&rcu); 2208 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2209 return 0; 2210 } 2211 2212 /* kthread function to drive and coordinate RCU barrier testing. */ 2213 static int rcu_torture_barrier(void *arg) 2214 { 2215 int i; 2216 2217 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2218 do { 2219 atomic_set(&barrier_cbs_invoked, 0); 2220 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2221 /* Ensure barrier_phase ordered after prior assignments. */ 2222 smp_store_release(&barrier_phase, !barrier_phase); 2223 for (i = 0; i < n_barrier_cbs; i++) 2224 wake_up(&barrier_cbs_wq[i]); 2225 wait_event(barrier_wq, 2226 atomic_read(&barrier_cbs_count) == 0 || 2227 torture_must_stop()); 2228 if (torture_must_stop()) 2229 break; 2230 n_barrier_attempts++; 2231 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2232 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2233 n_rcu_torture_barrier_error++; 2234 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2235 atomic_read(&barrier_cbs_invoked), 2236 n_barrier_cbs); 2237 WARN_ON(1); 2238 // Wait manually for the remaining callbacks 2239 i = 0; 2240 do { 2241 if (WARN_ON(i++ > HZ)) 2242 i = INT_MIN; 2243 schedule_timeout_interruptible(1); 2244 cur_ops->cb_barrier(); 2245 } while (atomic_read(&barrier_cbs_invoked) != 2246 n_barrier_cbs && 2247 !torture_must_stop()); 2248 smp_mb(); // Can't trust ordering if broken. 2249 if (!torture_must_stop()) 2250 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2251 atomic_read(&barrier_cbs_invoked)); 2252 } else { 2253 n_barrier_successes++; 2254 } 2255 schedule_timeout_interruptible(HZ / 10); 2256 } while (!torture_must_stop()); 2257 torture_kthread_stopping("rcu_torture_barrier"); 2258 return 0; 2259 } 2260 2261 /* Initialize RCU barrier testing. */ 2262 static int rcu_torture_barrier_init(void) 2263 { 2264 int i; 2265 int ret; 2266 2267 if (n_barrier_cbs <= 0) 2268 return 0; 2269 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2270 pr_alert("%s" TORTURE_FLAG 2271 " Call or barrier ops missing for %s,\n", 2272 torture_type, cur_ops->name); 2273 pr_alert("%s" TORTURE_FLAG 2274 " RCU barrier testing omitted from run.\n", 2275 torture_type); 2276 return 0; 2277 } 2278 atomic_set(&barrier_cbs_count, 0); 2279 atomic_set(&barrier_cbs_invoked, 0); 2280 barrier_cbs_tasks = 2281 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2282 GFP_KERNEL); 2283 barrier_cbs_wq = 2284 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2285 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2286 return -ENOMEM; 2287 for (i = 0; i < n_barrier_cbs; i++) { 2288 init_waitqueue_head(&barrier_cbs_wq[i]); 2289 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2290 (void *)(long)i, 2291 barrier_cbs_tasks[i]); 2292 if (ret) 2293 return ret; 2294 } 2295 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2296 } 2297 2298 /* Clean up after RCU barrier testing. */ 2299 static void rcu_torture_barrier_cleanup(void) 2300 { 2301 int i; 2302 2303 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2304 if (barrier_cbs_tasks != NULL) { 2305 for (i = 0; i < n_barrier_cbs; i++) 2306 torture_stop_kthread(rcu_torture_barrier_cbs, 2307 barrier_cbs_tasks[i]); 2308 kfree(barrier_cbs_tasks); 2309 barrier_cbs_tasks = NULL; 2310 } 2311 if (barrier_cbs_wq != NULL) { 2312 kfree(barrier_cbs_wq); 2313 barrier_cbs_wq = NULL; 2314 } 2315 } 2316 2317 static bool rcu_torture_can_boost(void) 2318 { 2319 static int boost_warn_once; 2320 int prio; 2321 2322 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2323 return false; 2324 2325 prio = rcu_get_gp_kthreads_prio(); 2326 if (!prio) 2327 return false; 2328 2329 if (prio < 2) { 2330 if (boost_warn_once == 1) 2331 return false; 2332 2333 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2334 boost_warn_once = 1; 2335 return false; 2336 } 2337 2338 return true; 2339 } 2340 2341 static enum cpuhp_state rcutor_hp; 2342 2343 static void 2344 rcu_torture_cleanup(void) 2345 { 2346 int firsttime; 2347 int flags = 0; 2348 unsigned long gp_seq = 0; 2349 int i; 2350 2351 if (torture_cleanup_begin()) { 2352 if (cur_ops->cb_barrier != NULL) 2353 cur_ops->cb_barrier(); 2354 return; 2355 } 2356 if (!cur_ops) { 2357 torture_cleanup_end(); 2358 return; 2359 } 2360 2361 show_rcu_gp_kthreads(); 2362 rcu_torture_barrier_cleanup(); 2363 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2364 torture_stop_kthread(rcu_torture_stall, stall_task); 2365 torture_stop_kthread(rcu_torture_writer, writer_task); 2366 2367 if (reader_tasks) { 2368 for (i = 0; i < nrealreaders; i++) 2369 torture_stop_kthread(rcu_torture_reader, 2370 reader_tasks[i]); 2371 kfree(reader_tasks); 2372 } 2373 rcu_torture_current = NULL; 2374 2375 if (fakewriter_tasks) { 2376 for (i = 0; i < nfakewriters; i++) { 2377 torture_stop_kthread(rcu_torture_fakewriter, 2378 fakewriter_tasks[i]); 2379 } 2380 kfree(fakewriter_tasks); 2381 fakewriter_tasks = NULL; 2382 } 2383 2384 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2385 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2386 pr_alert("%s: End-test grace-period state: g%lu f%#x\n", 2387 cur_ops->name, gp_seq, flags); 2388 torture_stop_kthread(rcu_torture_stats, stats_task); 2389 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2390 if (rcu_torture_can_boost()) 2391 cpuhp_remove_state(rcutor_hp); 2392 2393 /* 2394 * Wait for all RCU callbacks to fire, then do torture-type-specific 2395 * cleanup operations. 2396 */ 2397 if (cur_ops->cb_barrier != NULL) 2398 cur_ops->cb_barrier(); 2399 if (cur_ops->cleanup != NULL) 2400 cur_ops->cleanup(); 2401 2402 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2403 2404 if (err_segs_recorded) { 2405 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2406 if (rt_read_nsegs == 0) 2407 pr_alert("\t: No segments recorded!!!\n"); 2408 firsttime = 1; 2409 for (i = 0; i < rt_read_nsegs; i++) { 2410 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2411 if (err_segs[i].rt_delay_jiffies != 0) { 2412 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2413 err_segs[i].rt_delay_jiffies); 2414 firsttime = 0; 2415 } 2416 if (err_segs[i].rt_delay_ms != 0) { 2417 pr_cont("%s%ldms", firsttime ? "" : "+", 2418 err_segs[i].rt_delay_ms); 2419 firsttime = 0; 2420 } 2421 if (err_segs[i].rt_delay_us != 0) { 2422 pr_cont("%s%ldus", firsttime ? "" : "+", 2423 err_segs[i].rt_delay_us); 2424 firsttime = 0; 2425 } 2426 pr_cont("%s\n", 2427 err_segs[i].rt_preempted ? "preempted" : ""); 2428 2429 } 2430 } 2431 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2432 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2433 else if (torture_onoff_failures()) 2434 rcu_torture_print_module_parms(cur_ops, 2435 "End of test: RCU_HOTPLUG"); 2436 else 2437 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2438 torture_cleanup_end(); 2439 } 2440 2441 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2442 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2443 { 2444 } 2445 2446 static void rcu_torture_err_cb(struct rcu_head *rhp) 2447 { 2448 /* 2449 * This -might- happen due to race conditions, but is unlikely. 2450 * The scenario that leads to this happening is that the 2451 * first of the pair of duplicate callbacks is queued, 2452 * someone else starts a grace period that includes that 2453 * callback, then the second of the pair must wait for the 2454 * next grace period. Unlikely, but can happen. If it 2455 * does happen, the debug-objects subsystem won't have splatted. 2456 */ 2457 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2458 } 2459 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2460 2461 /* 2462 * Verify that double-free causes debug-objects to complain, but only 2463 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2464 * cannot be carried out. 2465 */ 2466 static void rcu_test_debug_objects(void) 2467 { 2468 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2469 struct rcu_head rh1; 2470 struct rcu_head rh2; 2471 2472 init_rcu_head_on_stack(&rh1); 2473 init_rcu_head_on_stack(&rh2); 2474 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2475 2476 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2477 preempt_disable(); /* Prevent preemption from interrupting test. */ 2478 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2479 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2480 local_irq_disable(); /* Make it harder to start a new grace period. */ 2481 call_rcu(&rh2, rcu_torture_leak_cb); 2482 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2483 local_irq_enable(); 2484 rcu_read_unlock(); 2485 preempt_enable(); 2486 2487 /* Wait for them all to get done so we can safely return. */ 2488 rcu_barrier(); 2489 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2490 destroy_rcu_head_on_stack(&rh1); 2491 destroy_rcu_head_on_stack(&rh2); 2492 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2493 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2494 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2495 } 2496 2497 static void rcutorture_sync(void) 2498 { 2499 static unsigned long n; 2500 2501 if (cur_ops->sync && !(++n & 0xfff)) 2502 cur_ops->sync(); 2503 } 2504 2505 static int __init 2506 rcu_torture_init(void) 2507 { 2508 long i; 2509 int cpu; 2510 int firsterr = 0; 2511 static struct rcu_torture_ops *torture_ops[] = { 2512 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2513 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, 2514 &tasks_tracing_ops, &trivial_ops, 2515 }; 2516 2517 if (!torture_init_begin(torture_type, verbose)) 2518 return -EBUSY; 2519 2520 /* Process args and tell the world that the torturer is on the job. */ 2521 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2522 cur_ops = torture_ops[i]; 2523 if (strcmp(torture_type, cur_ops->name) == 0) 2524 break; 2525 } 2526 if (i == ARRAY_SIZE(torture_ops)) { 2527 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2528 torture_type); 2529 pr_alert("rcu-torture types:"); 2530 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2531 pr_cont(" %s", torture_ops[i]->name); 2532 pr_cont("\n"); 2533 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 2534 firsterr = -EINVAL; 2535 cur_ops = NULL; 2536 goto unwind; 2537 } 2538 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2539 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2540 fqs_duration = 0; 2541 } 2542 if (cur_ops->init) 2543 cur_ops->init(); 2544 2545 if (nreaders >= 0) { 2546 nrealreaders = nreaders; 2547 } else { 2548 nrealreaders = num_online_cpus() - 2 - nreaders; 2549 if (nrealreaders <= 0) 2550 nrealreaders = 1; 2551 } 2552 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2553 2554 /* Set up the freelist. */ 2555 2556 INIT_LIST_HEAD(&rcu_torture_freelist); 2557 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2558 rcu_tortures[i].rtort_mbtest = 0; 2559 list_add_tail(&rcu_tortures[i].rtort_free, 2560 &rcu_torture_freelist); 2561 } 2562 2563 /* Initialize the statistics so that each run gets its own numbers. */ 2564 2565 rcu_torture_current = NULL; 2566 rcu_torture_current_version = 0; 2567 atomic_set(&n_rcu_torture_alloc, 0); 2568 atomic_set(&n_rcu_torture_alloc_fail, 0); 2569 atomic_set(&n_rcu_torture_free, 0); 2570 atomic_set(&n_rcu_torture_mberror, 0); 2571 atomic_set(&n_rcu_torture_error, 0); 2572 n_rcu_torture_barrier_error = 0; 2573 n_rcu_torture_boost_ktrerror = 0; 2574 n_rcu_torture_boost_rterror = 0; 2575 n_rcu_torture_boost_failure = 0; 2576 n_rcu_torture_boosts = 0; 2577 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2578 atomic_set(&rcu_torture_wcount[i], 0); 2579 for_each_possible_cpu(cpu) { 2580 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2581 per_cpu(rcu_torture_count, cpu)[i] = 0; 2582 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2583 } 2584 } 2585 err_segs_recorded = 0; 2586 rt_read_nsegs = 0; 2587 2588 /* Start up the kthreads. */ 2589 2590 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2591 writer_task); 2592 if (firsterr) 2593 goto unwind; 2594 if (nfakewriters > 0) { 2595 fakewriter_tasks = kcalloc(nfakewriters, 2596 sizeof(fakewriter_tasks[0]), 2597 GFP_KERNEL); 2598 if (fakewriter_tasks == NULL) { 2599 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2600 firsterr = -ENOMEM; 2601 goto unwind; 2602 } 2603 } 2604 for (i = 0; i < nfakewriters; i++) { 2605 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2606 NULL, fakewriter_tasks[i]); 2607 if (firsterr) 2608 goto unwind; 2609 } 2610 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2611 GFP_KERNEL); 2612 if (reader_tasks == NULL) { 2613 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2614 firsterr = -ENOMEM; 2615 goto unwind; 2616 } 2617 for (i = 0; i < nrealreaders; i++) { 2618 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2619 reader_tasks[i]); 2620 if (firsterr) 2621 goto unwind; 2622 } 2623 if (stat_interval > 0) { 2624 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2625 stats_task); 2626 if (firsterr) 2627 goto unwind; 2628 } 2629 if (test_no_idle_hz && shuffle_interval > 0) { 2630 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2631 if (firsterr) 2632 goto unwind; 2633 } 2634 if (stutter < 0) 2635 stutter = 0; 2636 if (stutter) { 2637 int t; 2638 2639 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2640 firsterr = torture_stutter_init(stutter * HZ, t); 2641 if (firsterr) 2642 goto unwind; 2643 } 2644 if (fqs_duration < 0) 2645 fqs_duration = 0; 2646 if (fqs_duration) { 2647 /* Create the fqs thread */ 2648 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2649 fqs_task); 2650 if (firsterr) 2651 goto unwind; 2652 } 2653 if (test_boost_interval < 1) 2654 test_boost_interval = 1; 2655 if (test_boost_duration < 2) 2656 test_boost_duration = 2; 2657 if (rcu_torture_can_boost()) { 2658 2659 boost_starttime = jiffies + test_boost_interval * HZ; 2660 2661 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2662 rcutorture_booster_init, 2663 rcutorture_booster_cleanup); 2664 if (firsterr < 0) 2665 goto unwind; 2666 rcutor_hp = firsterr; 2667 } 2668 shutdown_jiffies = jiffies + shutdown_secs * HZ; 2669 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 2670 if (firsterr) 2671 goto unwind; 2672 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 2673 rcutorture_sync); 2674 if (firsterr) 2675 goto unwind; 2676 firsterr = rcu_torture_stall_init(); 2677 if (firsterr) 2678 goto unwind; 2679 firsterr = rcu_torture_fwd_prog_init(); 2680 if (firsterr) 2681 goto unwind; 2682 firsterr = rcu_torture_barrier_init(); 2683 if (firsterr) 2684 goto unwind; 2685 if (object_debug) 2686 rcu_test_debug_objects(); 2687 torture_init_end(); 2688 return 0; 2689 2690 unwind: 2691 torture_init_end(); 2692 rcu_torture_cleanup(); 2693 return firsterr; 2694 } 2695 2696 module_init(rcu_torture_init); 2697 module_exit(rcu_torture_cleanup); 2698