1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Read-Copy Update module-based torture test facility 4 * 5 * Copyright (C) IBM Corporation, 2005, 2006 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Josh Triplett <josh@joshtriplett.org> 9 * 10 * See also: Documentation/RCU/torture.rst 11 */ 12 13 #define pr_fmt(fmt) fmt 14 15 #include <linux/types.h> 16 #include <linux/kernel.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/kthread.h> 20 #include <linux/err.h> 21 #include <linux/spinlock.h> 22 #include <linux/smp.h> 23 #include <linux/rcupdate_wait.h> 24 #include <linux/interrupt.h> 25 #include <linux/sched/signal.h> 26 #include <uapi/linux/sched/types.h> 27 #include <linux/atomic.h> 28 #include <linux/bitops.h> 29 #include <linux/completion.h> 30 #include <linux/moduleparam.h> 31 #include <linux/percpu.h> 32 #include <linux/notifier.h> 33 #include <linux/reboot.h> 34 #include <linux/freezer.h> 35 #include <linux/cpu.h> 36 #include <linux/delay.h> 37 #include <linux/stat.h> 38 #include <linux/srcu.h> 39 #include <linux/slab.h> 40 #include <linux/trace_clock.h> 41 #include <asm/byteorder.h> 42 #include <linux/torture.h> 43 #include <linux/vmalloc.h> 44 #include <linux/sched/debug.h> 45 #include <linux/sched/sysctl.h> 46 #include <linux/oom.h> 47 #include <linux/tick.h> 48 #include <linux/rcupdate_trace.h> 49 50 #include "rcu.h" 51 52 MODULE_LICENSE("GPL"); 53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); 54 55 /* Bits for ->extendables field, extendables param, and related definitions. */ 56 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ 57 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) 58 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ 59 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ 60 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ 61 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ 62 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ 63 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ 64 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ 65 #define RCUTORTURE_MAX_EXTEND \ 66 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ 67 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) 68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ 69 /* Must be power of two minus one. */ 70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) 71 72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, 73 "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); 74 torture_param(int, fqs_duration, 0, 75 "Duration of fqs bursts (us), 0 to disable"); 76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); 77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); 78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); 79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); 80 torture_param(int, fwd_progress_holdoff, 60, 81 "Time between forward-progress tests (s)"); 82 torture_param(bool, fwd_progress_need_resched, 1, 83 "Hide cond_resched() behind need_resched()"); 84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); 85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); 86 torture_param(bool, gp_normal, false, 87 "Use normal (non-expedited) GP wait primitives"); 88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives"); 89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); 90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); 91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); 92 torture_param(int, n_barrier_cbs, 0, 93 "# of callbacks/kthreads for barrier testing"); 94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); 95 torture_param(int, nreaders, -1, "Number of RCU reader threads"); 96 torture_param(int, object_debug, 0, 97 "Enable debug-object double call_rcu() testing"); 98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 99 torture_param(int, onoff_interval, 0, 100 "Time between CPU hotplugs (jiffies), 0=disable"); 101 torture_param(int, read_exit_delay, 13, 102 "Delay between read-then-exit episodes (s)"); 103 torture_param(int, read_exit_burst, 16, 104 "# of read-then-exit bursts per episode, zero to disable"); 105 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); 106 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); 107 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); 108 torture_param(int, stall_cpu_holdoff, 10, 109 "Time to wait before starting stall (s)."); 110 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); 111 torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); 112 torture_param(int, stall_gp_kthread, 0, 113 "Grace-period kthread stall duration (s)."); 114 torture_param(int, stat_interval, 60, 115 "Number of seconds between stats printk()s"); 116 torture_param(int, stutter, 5, "Number of seconds to run/halt test"); 117 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); 118 torture_param(int, test_boost_duration, 4, 119 "Duration of each boost test, seconds."); 120 torture_param(int, test_boost_interval, 7, 121 "Interval between boost tests, seconds."); 122 torture_param(bool, test_no_idle_hz, true, 123 "Test support for tickless idle CPUs"); 124 torture_param(int, verbose, 1, 125 "Enable verbose debugging printk()s"); 126 127 static char *torture_type = "rcu"; 128 module_param(torture_type, charp, 0444); 129 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); 130 131 static int nrealreaders; 132 static struct task_struct *writer_task; 133 static struct task_struct **fakewriter_tasks; 134 static struct task_struct **reader_tasks; 135 static struct task_struct *stats_task; 136 static struct task_struct *fqs_task; 137 static struct task_struct *boost_tasks[NR_CPUS]; 138 static struct task_struct *stall_task; 139 static struct task_struct *fwd_prog_task; 140 static struct task_struct **barrier_cbs_tasks; 141 static struct task_struct *barrier_task; 142 static struct task_struct *read_exit_task; 143 144 #define RCU_TORTURE_PIPE_LEN 10 145 146 // Mailbox-like structure to check RCU global memory ordering. 147 struct rcu_torture_reader_check { 148 unsigned long rtc_myloops; 149 int rtc_chkrdr; 150 unsigned long rtc_chkloops; 151 int rtc_ready; 152 struct rcu_torture_reader_check *rtc_assigner; 153 } ____cacheline_internodealigned_in_smp; 154 155 // Update-side data structure used to check RCU readers. 156 struct rcu_torture { 157 struct rcu_head rtort_rcu; 158 int rtort_pipe_count; 159 struct list_head rtort_free; 160 int rtort_mbtest; 161 struct rcu_torture_reader_check *rtort_chkp; 162 }; 163 164 static LIST_HEAD(rcu_torture_freelist); 165 static struct rcu_torture __rcu *rcu_torture_current; 166 static unsigned long rcu_torture_current_version; 167 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; 168 static DEFINE_SPINLOCK(rcu_torture_lock); 169 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count); 170 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch); 171 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 172 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk; 173 static atomic_t n_rcu_torture_alloc; 174 static atomic_t n_rcu_torture_alloc_fail; 175 static atomic_t n_rcu_torture_free; 176 static atomic_t n_rcu_torture_mberror; 177 static atomic_t n_rcu_torture_mbchk_fail; 178 static atomic_t n_rcu_torture_mbchk_tries; 179 static atomic_t n_rcu_torture_error; 180 static long n_rcu_torture_barrier_error; 181 static long n_rcu_torture_boost_ktrerror; 182 static long n_rcu_torture_boost_rterror; 183 static long n_rcu_torture_boost_failure; 184 static long n_rcu_torture_boosts; 185 static atomic_long_t n_rcu_torture_timers; 186 static long n_barrier_attempts; 187 static long n_barrier_successes; /* did rcu_barrier test succeed? */ 188 static unsigned long n_read_exits; 189 static struct list_head rcu_torture_removed; 190 static unsigned long shutdown_jiffies; 191 static unsigned long start_gp_seq; 192 193 static int rcu_torture_writer_state; 194 #define RTWS_FIXED_DELAY 0 195 #define RTWS_DELAY 1 196 #define RTWS_REPLACE 2 197 #define RTWS_DEF_FREE 3 198 #define RTWS_EXP_SYNC 4 199 #define RTWS_COND_GET 5 200 #define RTWS_COND_SYNC 6 201 #define RTWS_POLL_GET 7 202 #define RTWS_POLL_WAIT 8 203 #define RTWS_SYNC 9 204 #define RTWS_STUTTER 10 205 #define RTWS_STOPPING 11 206 static const char * const rcu_torture_writer_state_names[] = { 207 "RTWS_FIXED_DELAY", 208 "RTWS_DELAY", 209 "RTWS_REPLACE", 210 "RTWS_DEF_FREE", 211 "RTWS_EXP_SYNC", 212 "RTWS_COND_GET", 213 "RTWS_COND_SYNC", 214 "RTWS_POLL_GET", 215 "RTWS_POLL_WAIT", 216 "RTWS_SYNC", 217 "RTWS_STUTTER", 218 "RTWS_STOPPING", 219 }; 220 221 /* Record reader segment types and duration for first failing read. */ 222 struct rt_read_seg { 223 int rt_readstate; 224 unsigned long rt_delay_jiffies; 225 unsigned long rt_delay_ms; 226 unsigned long rt_delay_us; 227 bool rt_preempted; 228 }; 229 static int err_segs_recorded; 230 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; 231 static int rt_read_nsegs; 232 233 static const char *rcu_torture_writer_state_getname(void) 234 { 235 unsigned int i = READ_ONCE(rcu_torture_writer_state); 236 237 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names)) 238 return "???"; 239 return rcu_torture_writer_state_names[i]; 240 } 241 242 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) 243 #define rcu_can_boost() 1 244 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 245 #define rcu_can_boost() 0 246 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */ 247 248 #ifdef CONFIG_RCU_TRACE 249 static u64 notrace rcu_trace_clock_local(void) 250 { 251 u64 ts = trace_clock_local(); 252 253 (void)do_div(ts, NSEC_PER_USEC); 254 return ts; 255 } 256 #else /* #ifdef CONFIG_RCU_TRACE */ 257 static u64 notrace rcu_trace_clock_local(void) 258 { 259 return 0ULL; 260 } 261 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 262 263 /* 264 * Stop aggressive CPU-hog tests a bit before the end of the test in order 265 * to avoid interfering with test shutdown. 266 */ 267 static bool shutdown_time_arrived(void) 268 { 269 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); 270 } 271 272 static unsigned long boost_starttime; /* jiffies of next boost test start. */ 273 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ 274 /* and boost task create/destroy. */ 275 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */ 276 static bool barrier_phase; /* Test phase. */ 277 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ 278 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ 279 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); 280 281 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ 282 283 /* 284 * Allocate an element from the rcu_tortures pool. 285 */ 286 static struct rcu_torture * 287 rcu_torture_alloc(void) 288 { 289 struct list_head *p; 290 291 spin_lock_bh(&rcu_torture_lock); 292 if (list_empty(&rcu_torture_freelist)) { 293 atomic_inc(&n_rcu_torture_alloc_fail); 294 spin_unlock_bh(&rcu_torture_lock); 295 return NULL; 296 } 297 atomic_inc(&n_rcu_torture_alloc); 298 p = rcu_torture_freelist.next; 299 list_del_init(p); 300 spin_unlock_bh(&rcu_torture_lock); 301 return container_of(p, struct rcu_torture, rtort_free); 302 } 303 304 /* 305 * Free an element to the rcu_tortures pool. 306 */ 307 static void 308 rcu_torture_free(struct rcu_torture *p) 309 { 310 atomic_inc(&n_rcu_torture_free); 311 spin_lock_bh(&rcu_torture_lock); 312 list_add_tail(&p->rtort_free, &rcu_torture_freelist); 313 spin_unlock_bh(&rcu_torture_lock); 314 } 315 316 /* 317 * Operations vector for selecting different types of tests. 318 */ 319 320 struct rcu_torture_ops { 321 int ttype; 322 void (*init)(void); 323 void (*cleanup)(void); 324 int (*readlock)(void); 325 void (*read_delay)(struct torture_random_state *rrsp, 326 struct rt_read_seg *rtrsp); 327 void (*readunlock)(int idx); 328 unsigned long (*get_gp_seq)(void); 329 unsigned long (*gp_diff)(unsigned long new, unsigned long old); 330 void (*deferred_free)(struct rcu_torture *p); 331 void (*sync)(void); 332 void (*exp_sync)(void); 333 unsigned long (*get_gp_state)(void); 334 unsigned long (*start_gp_poll)(void); 335 bool (*poll_gp_state)(unsigned long oldstate); 336 void (*cond_sync)(unsigned long oldstate); 337 call_rcu_func_t call; 338 void (*cb_barrier)(void); 339 void (*fqs)(void); 340 void (*stats)(void); 341 void (*gp_kthread_dbg)(void); 342 int (*stall_dur)(void); 343 int irq_capable; 344 int can_boost; 345 int extendables; 346 int slow_gps; 347 const char *name; 348 }; 349 350 static struct rcu_torture_ops *cur_ops; 351 352 /* 353 * Definitions for rcu torture testing. 354 */ 355 356 static int rcu_torture_read_lock(void) __acquires(RCU) 357 { 358 rcu_read_lock(); 359 return 0; 360 } 361 362 static void 363 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 364 { 365 unsigned long started; 366 unsigned long completed; 367 const unsigned long shortdelay_us = 200; 368 unsigned long longdelay_ms = 300; 369 unsigned long long ts; 370 371 /* We want a short delay sometimes to make a reader delay the grace 372 * period, and we want a long delay occasionally to trigger 373 * force_quiescent_state. */ 374 375 if (!READ_ONCE(rcu_fwd_cb_nodelay) && 376 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { 377 started = cur_ops->get_gp_seq(); 378 ts = rcu_trace_clock_local(); 379 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) 380 longdelay_ms = 5; /* Avoid triggering BH limits. */ 381 mdelay(longdelay_ms); 382 rtrsp->rt_delay_ms = longdelay_ms; 383 completed = cur_ops->get_gp_seq(); 384 do_trace_rcu_torture_read(cur_ops->name, NULL, ts, 385 started, completed); 386 } 387 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { 388 udelay(shortdelay_us); 389 rtrsp->rt_delay_us = shortdelay_us; 390 } 391 if (!preempt_count() && 392 !(torture_random(rrsp) % (nrealreaders * 500))) { 393 torture_preempt_schedule(); /* QS only if preemptible. */ 394 rtrsp->rt_preempted = true; 395 } 396 } 397 398 static void rcu_torture_read_unlock(int idx) __releases(RCU) 399 { 400 rcu_read_unlock(); 401 } 402 403 /* 404 * Update callback in the pipe. This should be invoked after a grace period. 405 */ 406 static bool 407 rcu_torture_pipe_update_one(struct rcu_torture *rp) 408 { 409 int i; 410 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp); 411 412 if (rtrcp) { 413 WRITE_ONCE(rp->rtort_chkp, NULL); 414 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire(). 415 } 416 i = READ_ONCE(rp->rtort_pipe_count); 417 if (i > RCU_TORTURE_PIPE_LEN) 418 i = RCU_TORTURE_PIPE_LEN; 419 atomic_inc(&rcu_torture_wcount[i]); 420 WRITE_ONCE(rp->rtort_pipe_count, i + 1); 421 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { 422 rp->rtort_mbtest = 0; 423 return true; 424 } 425 return false; 426 } 427 428 /* 429 * Update all callbacks in the pipe. Suitable for synchronous grace-period 430 * primitives. 431 */ 432 static void 433 rcu_torture_pipe_update(struct rcu_torture *old_rp) 434 { 435 struct rcu_torture *rp; 436 struct rcu_torture *rp1; 437 438 if (old_rp) 439 list_add(&old_rp->rtort_free, &rcu_torture_removed); 440 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) { 441 if (rcu_torture_pipe_update_one(rp)) { 442 list_del(&rp->rtort_free); 443 rcu_torture_free(rp); 444 } 445 } 446 } 447 448 static void 449 rcu_torture_cb(struct rcu_head *p) 450 { 451 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); 452 453 if (torture_must_stop_irq()) { 454 /* Test is ending, just drop callbacks on the floor. */ 455 /* The next initialization will pick up the pieces. */ 456 return; 457 } 458 if (rcu_torture_pipe_update_one(rp)) 459 rcu_torture_free(rp); 460 else 461 cur_ops->deferred_free(rp); 462 } 463 464 static unsigned long rcu_no_completed(void) 465 { 466 return 0; 467 } 468 469 static void rcu_torture_deferred_free(struct rcu_torture *p) 470 { 471 call_rcu(&p->rtort_rcu, rcu_torture_cb); 472 } 473 474 static void rcu_sync_torture_init(void) 475 { 476 INIT_LIST_HEAD(&rcu_torture_removed); 477 } 478 479 static struct rcu_torture_ops rcu_ops = { 480 .ttype = RCU_FLAVOR, 481 .init = rcu_sync_torture_init, 482 .readlock = rcu_torture_read_lock, 483 .read_delay = rcu_read_delay, 484 .readunlock = rcu_torture_read_unlock, 485 .get_gp_seq = rcu_get_gp_seq, 486 .gp_diff = rcu_seq_diff, 487 .deferred_free = rcu_torture_deferred_free, 488 .sync = synchronize_rcu, 489 .exp_sync = synchronize_rcu_expedited, 490 .get_gp_state = get_state_synchronize_rcu, 491 .cond_sync = cond_synchronize_rcu, 492 .call = call_rcu, 493 .cb_barrier = rcu_barrier, 494 .fqs = rcu_force_quiescent_state, 495 .stats = NULL, 496 .gp_kthread_dbg = show_rcu_gp_kthreads, 497 .stall_dur = rcu_jiffies_till_stall_check, 498 .irq_capable = 1, 499 .can_boost = rcu_can_boost(), 500 .extendables = RCUTORTURE_MAX_EXTEND, 501 .name = "rcu" 502 }; 503 504 /* 505 * Don't even think about trying any of these in real life!!! 506 * The names includes "busted", and they really means it! 507 * The only purpose of these functions is to provide a buggy RCU 508 * implementation to make sure that rcutorture correctly emits 509 * buggy-RCU error messages. 510 */ 511 static void rcu_busted_torture_deferred_free(struct rcu_torture *p) 512 { 513 /* This is a deliberate bug for testing purposes only! */ 514 rcu_torture_cb(&p->rtort_rcu); 515 } 516 517 static void synchronize_rcu_busted(void) 518 { 519 /* This is a deliberate bug for testing purposes only! */ 520 } 521 522 static void 523 call_rcu_busted(struct rcu_head *head, rcu_callback_t func) 524 { 525 /* This is a deliberate bug for testing purposes only! */ 526 func(head); 527 } 528 529 static struct rcu_torture_ops rcu_busted_ops = { 530 .ttype = INVALID_RCU_FLAVOR, 531 .init = rcu_sync_torture_init, 532 .readlock = rcu_torture_read_lock, 533 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 534 .readunlock = rcu_torture_read_unlock, 535 .get_gp_seq = rcu_no_completed, 536 .deferred_free = rcu_busted_torture_deferred_free, 537 .sync = synchronize_rcu_busted, 538 .exp_sync = synchronize_rcu_busted, 539 .call = call_rcu_busted, 540 .cb_barrier = NULL, 541 .fqs = NULL, 542 .stats = NULL, 543 .irq_capable = 1, 544 .name = "busted" 545 }; 546 547 /* 548 * Definitions for srcu torture testing. 549 */ 550 551 DEFINE_STATIC_SRCU(srcu_ctl); 552 static struct srcu_struct srcu_ctld; 553 static struct srcu_struct *srcu_ctlp = &srcu_ctl; 554 555 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp) 556 { 557 return srcu_read_lock(srcu_ctlp); 558 } 559 560 static void 561 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) 562 { 563 long delay; 564 const long uspertick = 1000000 / HZ; 565 const long longdelay = 10; 566 567 /* We want there to be long-running readers, but not all the time. */ 568 569 delay = torture_random(rrsp) % 570 (nrealreaders * 2 * longdelay * uspertick); 571 if (!delay && in_task()) { 572 schedule_timeout_interruptible(longdelay); 573 rtrsp->rt_delay_jiffies = longdelay; 574 } else { 575 rcu_read_delay(rrsp, rtrsp); 576 } 577 } 578 579 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) 580 { 581 srcu_read_unlock(srcu_ctlp, idx); 582 } 583 584 static unsigned long srcu_torture_completed(void) 585 { 586 return srcu_batches_completed(srcu_ctlp); 587 } 588 589 static void srcu_torture_deferred_free(struct rcu_torture *rp) 590 { 591 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb); 592 } 593 594 static void srcu_torture_synchronize(void) 595 { 596 synchronize_srcu(srcu_ctlp); 597 } 598 599 static unsigned long srcu_torture_get_gp_state(void) 600 { 601 return get_state_synchronize_srcu(srcu_ctlp); 602 } 603 604 static unsigned long srcu_torture_start_gp_poll(void) 605 { 606 return start_poll_synchronize_srcu(srcu_ctlp); 607 } 608 609 static bool srcu_torture_poll_gp_state(unsigned long oldstate) 610 { 611 return poll_state_synchronize_srcu(srcu_ctlp, oldstate); 612 } 613 614 static void srcu_torture_call(struct rcu_head *head, 615 rcu_callback_t func) 616 { 617 call_srcu(srcu_ctlp, head, func); 618 } 619 620 static void srcu_torture_barrier(void) 621 { 622 srcu_barrier(srcu_ctlp); 623 } 624 625 static void srcu_torture_stats(void) 626 { 627 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG); 628 } 629 630 static void srcu_torture_synchronize_expedited(void) 631 { 632 synchronize_srcu_expedited(srcu_ctlp); 633 } 634 635 static struct rcu_torture_ops srcu_ops = { 636 .ttype = SRCU_FLAVOR, 637 .init = rcu_sync_torture_init, 638 .readlock = srcu_torture_read_lock, 639 .read_delay = srcu_read_delay, 640 .readunlock = srcu_torture_read_unlock, 641 .get_gp_seq = srcu_torture_completed, 642 .deferred_free = srcu_torture_deferred_free, 643 .sync = srcu_torture_synchronize, 644 .exp_sync = srcu_torture_synchronize_expedited, 645 .get_gp_state = srcu_torture_get_gp_state, 646 .start_gp_poll = srcu_torture_start_gp_poll, 647 .poll_gp_state = srcu_torture_poll_gp_state, 648 .call = srcu_torture_call, 649 .cb_barrier = srcu_torture_barrier, 650 .stats = srcu_torture_stats, 651 .irq_capable = 1, 652 .name = "srcu" 653 }; 654 655 static void srcu_torture_init(void) 656 { 657 rcu_sync_torture_init(); 658 WARN_ON(init_srcu_struct(&srcu_ctld)); 659 srcu_ctlp = &srcu_ctld; 660 } 661 662 static void srcu_torture_cleanup(void) 663 { 664 cleanup_srcu_struct(&srcu_ctld); 665 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ 666 } 667 668 /* As above, but dynamically allocated. */ 669 static struct rcu_torture_ops srcud_ops = { 670 .ttype = SRCU_FLAVOR, 671 .init = srcu_torture_init, 672 .cleanup = srcu_torture_cleanup, 673 .readlock = srcu_torture_read_lock, 674 .read_delay = srcu_read_delay, 675 .readunlock = srcu_torture_read_unlock, 676 .get_gp_seq = srcu_torture_completed, 677 .deferred_free = srcu_torture_deferred_free, 678 .sync = srcu_torture_synchronize, 679 .exp_sync = srcu_torture_synchronize_expedited, 680 .call = srcu_torture_call, 681 .cb_barrier = srcu_torture_barrier, 682 .stats = srcu_torture_stats, 683 .irq_capable = 1, 684 .name = "srcud" 685 }; 686 687 /* As above, but broken due to inappropriate reader extension. */ 688 static struct rcu_torture_ops busted_srcud_ops = { 689 .ttype = SRCU_FLAVOR, 690 .init = srcu_torture_init, 691 .cleanup = srcu_torture_cleanup, 692 .readlock = srcu_torture_read_lock, 693 .read_delay = rcu_read_delay, 694 .readunlock = srcu_torture_read_unlock, 695 .get_gp_seq = srcu_torture_completed, 696 .deferred_free = srcu_torture_deferred_free, 697 .sync = srcu_torture_synchronize, 698 .exp_sync = srcu_torture_synchronize_expedited, 699 .call = srcu_torture_call, 700 .cb_barrier = srcu_torture_barrier, 701 .stats = srcu_torture_stats, 702 .irq_capable = 1, 703 .extendables = RCUTORTURE_MAX_EXTEND, 704 .name = "busted_srcud" 705 }; 706 707 /* 708 * Definitions for RCU-tasks torture testing. 709 */ 710 711 static int tasks_torture_read_lock(void) 712 { 713 return 0; 714 } 715 716 static void tasks_torture_read_unlock(int idx) 717 { 718 } 719 720 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p) 721 { 722 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); 723 } 724 725 static void synchronize_rcu_mult_test(void) 726 { 727 synchronize_rcu_mult(call_rcu_tasks, call_rcu); 728 } 729 730 static struct rcu_torture_ops tasks_ops = { 731 .ttype = RCU_TASKS_FLAVOR, 732 .init = rcu_sync_torture_init, 733 .readlock = tasks_torture_read_lock, 734 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 735 .readunlock = tasks_torture_read_unlock, 736 .get_gp_seq = rcu_no_completed, 737 .deferred_free = rcu_tasks_torture_deferred_free, 738 .sync = synchronize_rcu_tasks, 739 .exp_sync = synchronize_rcu_mult_test, 740 .call = call_rcu_tasks, 741 .cb_barrier = rcu_barrier_tasks, 742 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread, 743 .fqs = NULL, 744 .stats = NULL, 745 .irq_capable = 1, 746 .slow_gps = 1, 747 .name = "tasks" 748 }; 749 750 /* 751 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. 752 * This implementation does not necessarily work well with CPU hotplug. 753 */ 754 755 static void synchronize_rcu_trivial(void) 756 { 757 int cpu; 758 759 for_each_online_cpu(cpu) { 760 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); 761 WARN_ON_ONCE(raw_smp_processor_id() != cpu); 762 } 763 } 764 765 static int rcu_torture_read_lock_trivial(void) __acquires(RCU) 766 { 767 preempt_disable(); 768 return 0; 769 } 770 771 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) 772 { 773 preempt_enable(); 774 } 775 776 static struct rcu_torture_ops trivial_ops = { 777 .ttype = RCU_TRIVIAL_FLAVOR, 778 .init = rcu_sync_torture_init, 779 .readlock = rcu_torture_read_lock_trivial, 780 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 781 .readunlock = rcu_torture_read_unlock_trivial, 782 .get_gp_seq = rcu_no_completed, 783 .sync = synchronize_rcu_trivial, 784 .exp_sync = synchronize_rcu_trivial, 785 .fqs = NULL, 786 .stats = NULL, 787 .irq_capable = 1, 788 .name = "trivial" 789 }; 790 791 /* 792 * Definitions for rude RCU-tasks torture testing. 793 */ 794 795 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) 796 { 797 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); 798 } 799 800 static struct rcu_torture_ops tasks_rude_ops = { 801 .ttype = RCU_TASKS_RUDE_FLAVOR, 802 .init = rcu_sync_torture_init, 803 .readlock = rcu_torture_read_lock_trivial, 804 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 805 .readunlock = rcu_torture_read_unlock_trivial, 806 .get_gp_seq = rcu_no_completed, 807 .deferred_free = rcu_tasks_rude_torture_deferred_free, 808 .sync = synchronize_rcu_tasks_rude, 809 .exp_sync = synchronize_rcu_tasks_rude, 810 .call = call_rcu_tasks_rude, 811 .cb_barrier = rcu_barrier_tasks_rude, 812 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread, 813 .fqs = NULL, 814 .stats = NULL, 815 .irq_capable = 1, 816 .name = "tasks-rude" 817 }; 818 819 /* 820 * Definitions for tracing RCU-tasks torture testing. 821 */ 822 823 static int tasks_tracing_torture_read_lock(void) 824 { 825 rcu_read_lock_trace(); 826 return 0; 827 } 828 829 static void tasks_tracing_torture_read_unlock(int idx) 830 { 831 rcu_read_unlock_trace(); 832 } 833 834 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) 835 { 836 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); 837 } 838 839 static struct rcu_torture_ops tasks_tracing_ops = { 840 .ttype = RCU_TASKS_TRACING_FLAVOR, 841 .init = rcu_sync_torture_init, 842 .readlock = tasks_tracing_torture_read_lock, 843 .read_delay = srcu_read_delay, /* just reuse srcu's version. */ 844 .readunlock = tasks_tracing_torture_read_unlock, 845 .get_gp_seq = rcu_no_completed, 846 .deferred_free = rcu_tasks_tracing_torture_deferred_free, 847 .sync = synchronize_rcu_tasks_trace, 848 .exp_sync = synchronize_rcu_tasks_trace, 849 .call = call_rcu_tasks_trace, 850 .cb_barrier = rcu_barrier_tasks_trace, 851 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread, 852 .fqs = NULL, 853 .stats = NULL, 854 .irq_capable = 1, 855 .slow_gps = 1, 856 .name = "tasks-tracing" 857 }; 858 859 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) 860 { 861 if (!cur_ops->gp_diff) 862 return new - old; 863 return cur_ops->gp_diff(new, old); 864 } 865 866 static bool __maybe_unused torturing_tasks(void) 867 { 868 return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; 869 } 870 871 /* 872 * RCU torture priority-boost testing. Runs one real-time thread per 873 * CPU for moderate bursts, repeatedly registering RCU callbacks and 874 * spinning waiting for them to be invoked. If a given callback takes 875 * too long to be invoked, we assume that priority inversion has occurred. 876 */ 877 878 struct rcu_boost_inflight { 879 struct rcu_head rcu; 880 int inflight; 881 }; 882 883 static void rcu_torture_boost_cb(struct rcu_head *head) 884 { 885 struct rcu_boost_inflight *rbip = 886 container_of(head, struct rcu_boost_inflight, rcu); 887 888 /* Ensure RCU-core accesses precede clearing ->inflight */ 889 smp_store_release(&rbip->inflight, 0); 890 } 891 892 static int old_rt_runtime = -1; 893 894 static void rcu_torture_disable_rt_throttle(void) 895 { 896 /* 897 * Disable RT throttling so that rcutorture's boost threads don't get 898 * throttled. Only possible if rcutorture is built-in otherwise the 899 * user should manually do this by setting the sched_rt_period_us and 900 * sched_rt_runtime sysctls. 901 */ 902 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1) 903 return; 904 905 old_rt_runtime = sysctl_sched_rt_runtime; 906 sysctl_sched_rt_runtime = -1; 907 } 908 909 static void rcu_torture_enable_rt_throttle(void) 910 { 911 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1) 912 return; 913 914 sysctl_sched_rt_runtime = old_rt_runtime; 915 old_rt_runtime = -1; 916 } 917 918 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end) 919 { 920 if (end - start > test_boost_duration * HZ - HZ / 2) { 921 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); 922 n_rcu_torture_boost_failure++; 923 924 return true; /* failed */ 925 } 926 927 return false; /* passed */ 928 } 929 930 static int rcu_torture_boost(void *arg) 931 { 932 unsigned long call_rcu_time; 933 unsigned long endtime; 934 unsigned long oldstarttime; 935 struct rcu_boost_inflight rbi = { .inflight = 0 }; 936 937 VERBOSE_TOROUT_STRING("rcu_torture_boost started"); 938 939 /* Set real-time priority. */ 940 sched_set_fifo_low(current); 941 942 init_rcu_head_on_stack(&rbi.rcu); 943 /* Each pass through the following loop does one boost-test cycle. */ 944 do { 945 /* Track if the test failed already in this test interval? */ 946 bool failed = false; 947 948 /* Increment n_rcu_torture_boosts once per boost-test */ 949 while (!kthread_should_stop()) { 950 if (mutex_trylock(&boost_mutex)) { 951 n_rcu_torture_boosts++; 952 mutex_unlock(&boost_mutex); 953 break; 954 } 955 schedule_timeout_uninterruptible(1); 956 } 957 if (kthread_should_stop()) 958 goto checkwait; 959 960 /* Wait for the next test interval. */ 961 oldstarttime = boost_starttime; 962 while (time_before(jiffies, oldstarttime)) { 963 schedule_timeout_interruptible(oldstarttime - jiffies); 964 if (stutter_wait("rcu_torture_boost")) 965 sched_set_fifo_low(current); 966 if (torture_must_stop()) 967 goto checkwait; 968 } 969 970 /* Do one boost-test interval. */ 971 endtime = oldstarttime + test_boost_duration * HZ; 972 call_rcu_time = jiffies; 973 while (time_before(jiffies, endtime)) { 974 /* If we don't have a callback in flight, post one. */ 975 if (!smp_load_acquire(&rbi.inflight)) { 976 /* RCU core before ->inflight = 1. */ 977 smp_store_release(&rbi.inflight, 1); 978 call_rcu(&rbi.rcu, rcu_torture_boost_cb); 979 /* Check if the boost test failed */ 980 failed = failed || 981 rcu_torture_boost_failed(call_rcu_time, 982 jiffies); 983 call_rcu_time = jiffies; 984 } 985 if (stutter_wait("rcu_torture_boost")) 986 sched_set_fifo_low(current); 987 if (torture_must_stop()) 988 goto checkwait; 989 } 990 991 /* 992 * If boost never happened, then inflight will always be 1, in 993 * this case the boost check would never happen in the above 994 * loop so do another one here. 995 */ 996 if (!failed && smp_load_acquire(&rbi.inflight)) 997 rcu_torture_boost_failed(call_rcu_time, jiffies); 998 999 /* 1000 * Set the start time of the next test interval. 1001 * Yes, this is vulnerable to long delays, but such 1002 * delays simply cause a false negative for the next 1003 * interval. Besides, we are running at RT priority, 1004 * so delays should be relatively rare. 1005 */ 1006 while (oldstarttime == boost_starttime && 1007 !kthread_should_stop()) { 1008 if (mutex_trylock(&boost_mutex)) { 1009 boost_starttime = jiffies + 1010 test_boost_interval * HZ; 1011 mutex_unlock(&boost_mutex); 1012 break; 1013 } 1014 schedule_timeout_uninterruptible(1); 1015 } 1016 1017 /* Go do the stutter. */ 1018 checkwait: if (stutter_wait("rcu_torture_boost")) 1019 sched_set_fifo_low(current); 1020 } while (!torture_must_stop()); 1021 1022 /* Clean up and exit. */ 1023 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { 1024 torture_shutdown_absorb("rcu_torture_boost"); 1025 schedule_timeout_uninterruptible(1); 1026 } 1027 destroy_rcu_head_on_stack(&rbi.rcu); 1028 torture_kthread_stopping("rcu_torture_boost"); 1029 return 0; 1030 } 1031 1032 /* 1033 * RCU torture force-quiescent-state kthread. Repeatedly induces 1034 * bursts of calls to force_quiescent_state(), increasing the probability 1035 * of occurrence of some important types of race conditions. 1036 */ 1037 static int 1038 rcu_torture_fqs(void *arg) 1039 { 1040 unsigned long fqs_resume_time; 1041 int fqs_burst_remaining; 1042 int oldnice = task_nice(current); 1043 1044 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); 1045 do { 1046 fqs_resume_time = jiffies + fqs_stutter * HZ; 1047 while (time_before(jiffies, fqs_resume_time) && 1048 !kthread_should_stop()) { 1049 schedule_timeout_interruptible(1); 1050 } 1051 fqs_burst_remaining = fqs_duration; 1052 while (fqs_burst_remaining > 0 && 1053 !kthread_should_stop()) { 1054 cur_ops->fqs(); 1055 udelay(fqs_holdoff); 1056 fqs_burst_remaining -= fqs_holdoff; 1057 } 1058 if (stutter_wait("rcu_torture_fqs")) 1059 sched_set_normal(current, oldnice); 1060 } while (!torture_must_stop()); 1061 torture_kthread_stopping("rcu_torture_fqs"); 1062 return 0; 1063 } 1064 1065 // Used by writers to randomly choose from the available grace-period 1066 // primitives. The only purpose of the initialization is to size the array. 1067 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC }; 1068 static int nsynctypes; 1069 1070 /* 1071 * Determine which grace-period primitives are available. 1072 */ 1073 static void rcu_torture_write_types(void) 1074 { 1075 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal; 1076 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync; 1077 1078 /* Initialize synctype[] array. If none set, take default. */ 1079 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1) 1080 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true; 1081 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) { 1082 synctype[nsynctypes++] = RTWS_COND_GET; 1083 pr_info("%s: Testing conditional GPs.\n", __func__); 1084 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) { 1085 pr_alert("%s: gp_cond without primitives.\n", __func__); 1086 } 1087 if (gp_exp1 && cur_ops->exp_sync) { 1088 synctype[nsynctypes++] = RTWS_EXP_SYNC; 1089 pr_info("%s: Testing expedited GPs.\n", __func__); 1090 } else if (gp_exp && !cur_ops->exp_sync) { 1091 pr_alert("%s: gp_exp without primitives.\n", __func__); 1092 } 1093 if (gp_normal1 && cur_ops->deferred_free) { 1094 synctype[nsynctypes++] = RTWS_DEF_FREE; 1095 pr_info("%s: Testing asynchronous GPs.\n", __func__); 1096 } else if (gp_normal && !cur_ops->deferred_free) { 1097 pr_alert("%s: gp_normal without primitives.\n", __func__); 1098 } 1099 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) { 1100 synctype[nsynctypes++] = RTWS_POLL_GET; 1101 pr_info("%s: Testing polling GPs.\n", __func__); 1102 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) { 1103 pr_alert("%s: gp_poll without primitives.\n", __func__); 1104 } 1105 if (gp_sync1 && cur_ops->sync) { 1106 synctype[nsynctypes++] = RTWS_SYNC; 1107 pr_info("%s: Testing normal GPs.\n", __func__); 1108 } else if (gp_sync && !cur_ops->sync) { 1109 pr_alert("%s: gp_sync without primitives.\n", __func__); 1110 } 1111 } 1112 1113 /* 1114 * RCU torture writer kthread. Repeatedly substitutes a new structure 1115 * for that pointed to by rcu_torture_current, freeing the old structure 1116 * after a series of grace periods (the "pipeline"). 1117 */ 1118 static int 1119 rcu_torture_writer(void *arg) 1120 { 1121 bool boot_ended; 1122 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal(); 1123 unsigned long cookie; 1124 int expediting = 0; 1125 unsigned long gp_snap; 1126 int i; 1127 int idx; 1128 int oldnice = task_nice(current); 1129 struct rcu_torture *rp; 1130 struct rcu_torture *old_rp; 1131 static DEFINE_TORTURE_RANDOM(rand); 1132 bool stutter_waited; 1133 1134 VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); 1135 if (!can_expedite) 1136 pr_alert("%s" TORTURE_FLAG 1137 " GP expediting controlled from boot/sysfs for %s.\n", 1138 torture_type, cur_ops->name); 1139 if (WARN_ONCE(nsynctypes == 0, 1140 "rcu_torture_writer: No update-side primitives.\n")) { 1141 /* 1142 * No updates primitives, so don't try updating. 1143 * The resulting test won't be testing much, hence the 1144 * above WARN_ONCE(). 1145 */ 1146 rcu_torture_writer_state = RTWS_STOPPING; 1147 torture_kthread_stopping("rcu_torture_writer"); 1148 } 1149 1150 do { 1151 rcu_torture_writer_state = RTWS_FIXED_DELAY; 1152 torture_hrtimeout_us(500, 1000, &rand); 1153 rp = rcu_torture_alloc(); 1154 if (rp == NULL) 1155 continue; 1156 rp->rtort_pipe_count = 0; 1157 rcu_torture_writer_state = RTWS_DELAY; 1158 udelay(torture_random(&rand) & 0x3ff); 1159 rcu_torture_writer_state = RTWS_REPLACE; 1160 old_rp = rcu_dereference_check(rcu_torture_current, 1161 current == writer_task); 1162 rp->rtort_mbtest = 1; 1163 rcu_assign_pointer(rcu_torture_current, rp); 1164 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */ 1165 if (old_rp) { 1166 i = old_rp->rtort_pipe_count; 1167 if (i > RCU_TORTURE_PIPE_LEN) 1168 i = RCU_TORTURE_PIPE_LEN; 1169 atomic_inc(&rcu_torture_wcount[i]); 1170 WRITE_ONCE(old_rp->rtort_pipe_count, 1171 old_rp->rtort_pipe_count + 1); 1172 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) { 1173 idx = cur_ops->readlock(); 1174 cookie = cur_ops->get_gp_state(); 1175 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && 1176 cur_ops->poll_gp_state(cookie), 1177 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n", 1178 __func__, 1179 rcu_torture_writer_state_getname(), 1180 rcu_torture_writer_state, 1181 cookie, cur_ops->get_gp_state()); 1182 cur_ops->readunlock(idx); 1183 } 1184 switch (synctype[torture_random(&rand) % nsynctypes]) { 1185 case RTWS_DEF_FREE: 1186 rcu_torture_writer_state = RTWS_DEF_FREE; 1187 cur_ops->deferred_free(old_rp); 1188 break; 1189 case RTWS_EXP_SYNC: 1190 rcu_torture_writer_state = RTWS_EXP_SYNC; 1191 cur_ops->exp_sync(); 1192 rcu_torture_pipe_update(old_rp); 1193 break; 1194 case RTWS_COND_GET: 1195 rcu_torture_writer_state = RTWS_COND_GET; 1196 gp_snap = cur_ops->get_gp_state(); 1197 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1198 rcu_torture_writer_state = RTWS_COND_SYNC; 1199 cur_ops->cond_sync(gp_snap); 1200 rcu_torture_pipe_update(old_rp); 1201 break; 1202 case RTWS_POLL_GET: 1203 rcu_torture_writer_state = RTWS_POLL_GET; 1204 gp_snap = cur_ops->start_gp_poll(); 1205 rcu_torture_writer_state = RTWS_POLL_WAIT; 1206 while (!cur_ops->poll_gp_state(gp_snap)) 1207 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1208 &rand); 1209 rcu_torture_pipe_update(old_rp); 1210 break; 1211 case RTWS_SYNC: 1212 rcu_torture_writer_state = RTWS_SYNC; 1213 cur_ops->sync(); 1214 rcu_torture_pipe_update(old_rp); 1215 break; 1216 default: 1217 WARN_ON_ONCE(1); 1218 break; 1219 } 1220 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1221 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE && 1222 !cur_ops->poll_gp_state(cookie), 1223 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n", 1224 __func__, 1225 rcu_torture_writer_state_getname(), 1226 rcu_torture_writer_state, 1227 cookie, cur_ops->get_gp_state()); 1228 } 1229 WRITE_ONCE(rcu_torture_current_version, 1230 rcu_torture_current_version + 1); 1231 /* Cycle through nesting levels of rcu_expedite_gp() calls. */ 1232 if (can_expedite && 1233 !(torture_random(&rand) & 0xff & (!!expediting - 1))) { 1234 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited()); 1235 if (expediting >= 0) 1236 rcu_expedite_gp(); 1237 else 1238 rcu_unexpedite_gp(); 1239 if (++expediting > 3) 1240 expediting = -expediting; 1241 } else if (!can_expedite) { /* Disabled during boot, recheck. */ 1242 can_expedite = !rcu_gp_is_expedited() && 1243 !rcu_gp_is_normal(); 1244 } 1245 rcu_torture_writer_state = RTWS_STUTTER; 1246 boot_ended = rcu_inkernel_boot_has_ended(); 1247 stutter_waited = stutter_wait("rcu_torture_writer"); 1248 if (stutter_waited && 1249 !READ_ONCE(rcu_fwd_cb_nodelay) && 1250 !cur_ops->slow_gps && 1251 !torture_must_stop() && 1252 boot_ended) 1253 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) 1254 if (list_empty(&rcu_tortures[i].rtort_free) && 1255 rcu_access_pointer(rcu_torture_current) != 1256 &rcu_tortures[i]) { 1257 rcu_ftrace_dump(DUMP_ALL); 1258 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); 1259 } 1260 if (stutter_waited) 1261 sched_set_normal(current, oldnice); 1262 } while (!torture_must_stop()); 1263 rcu_torture_current = NULL; // Let stats task know that we are done. 1264 /* Reset expediting back to unexpedited. */ 1265 if (expediting > 0) 1266 expediting = -expediting; 1267 while (can_expedite && expediting++ < 0) 1268 rcu_unexpedite_gp(); 1269 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited()); 1270 if (!can_expedite) 1271 pr_alert("%s" TORTURE_FLAG 1272 " Dynamic grace-period expediting was disabled.\n", 1273 torture_type); 1274 rcu_torture_writer_state = RTWS_STOPPING; 1275 torture_kthread_stopping("rcu_torture_writer"); 1276 return 0; 1277 } 1278 1279 /* 1280 * RCU torture fake writer kthread. Repeatedly calls sync, with a random 1281 * delay between calls. 1282 */ 1283 static int 1284 rcu_torture_fakewriter(void *arg) 1285 { 1286 unsigned long gp_snap; 1287 DEFINE_TORTURE_RANDOM(rand); 1288 1289 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); 1290 set_user_nice(current, MAX_NICE); 1291 1292 do { 1293 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand); 1294 if (cur_ops->cb_barrier != NULL && 1295 torture_random(&rand) % (nfakewriters * 8) == 0) { 1296 cur_ops->cb_barrier(); 1297 } else { 1298 switch (synctype[torture_random(&rand) % nsynctypes]) { 1299 case RTWS_DEF_FREE: 1300 break; 1301 case RTWS_EXP_SYNC: 1302 cur_ops->exp_sync(); 1303 break; 1304 case RTWS_COND_GET: 1305 gp_snap = cur_ops->get_gp_state(); 1306 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand); 1307 cur_ops->cond_sync(gp_snap); 1308 break; 1309 case RTWS_POLL_GET: 1310 gp_snap = cur_ops->start_gp_poll(); 1311 while (!cur_ops->poll_gp_state(gp_snap)) { 1312 torture_hrtimeout_jiffies(torture_random(&rand) % 16, 1313 &rand); 1314 } 1315 break; 1316 case RTWS_SYNC: 1317 cur_ops->sync(); 1318 break; 1319 default: 1320 WARN_ON_ONCE(1); 1321 break; 1322 } 1323 } 1324 stutter_wait("rcu_torture_fakewriter"); 1325 } while (!torture_must_stop()); 1326 1327 torture_kthread_stopping("rcu_torture_fakewriter"); 1328 return 0; 1329 } 1330 1331 static void rcu_torture_timer_cb(struct rcu_head *rhp) 1332 { 1333 kfree(rhp); 1334 } 1335 1336 // Set up and carry out testing of RCU's global memory ordering 1337 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp, 1338 struct torture_random_state *trsp) 1339 { 1340 unsigned long loops; 1341 int noc = torture_num_online_cpus(); 1342 int rdrchked; 1343 int rdrchker; 1344 struct rcu_torture_reader_check *rtrcp; // Me. 1345 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking. 1346 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked. 1347 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me. 1348 1349 if (myid < 0) 1350 return; // Don't try this from timer handlers. 1351 1352 // Increment my counter. 1353 rtrcp = &rcu_torture_reader_mbchk[myid]; 1354 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1); 1355 1356 // Attempt to assign someone else some checking work. 1357 rdrchked = torture_random(trsp) % nrealreaders; 1358 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1359 rdrchker = torture_random(trsp) % nrealreaders; 1360 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker]; 1361 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker && 1362 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below. 1363 !READ_ONCE(rtp->rtort_chkp) && 1364 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below. 1365 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops); 1366 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0); 1367 rtrcp->rtc_chkrdr = rdrchked; 1368 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends. 1369 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) || 1370 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp)) 1371 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out. 1372 } 1373 1374 // If assigned some completed work, do it! 1375 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner); 1376 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready)) 1377 return; // No work or work not yet ready. 1378 rdrchked = rtrcp_assigner->rtc_chkrdr; 1379 if (WARN_ON_ONCE(rdrchked < 0)) 1380 return; 1381 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked]; 1382 loops = READ_ONCE(rtrcp_chked->rtc_myloops); 1383 atomic_inc(&n_rcu_torture_mbchk_tries); 1384 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops)) 1385 atomic_inc(&n_rcu_torture_mbchk_fail); 1386 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2; 1387 rtrcp_assigner->rtc_ready = 0; 1388 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work. 1389 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign. 1390 } 1391 1392 /* 1393 * Do one extension of an RCU read-side critical section using the 1394 * current reader state in readstate (set to zero for initial entry 1395 * to extended critical section), set the new state as specified by 1396 * newstate (set to zero for final exit from extended critical section), 1397 * and random-number-generator state in trsp. If this is neither the 1398 * beginning or end of the critical section and if there was actually a 1399 * change, do a ->read_delay(). 1400 */ 1401 static void rcutorture_one_extend(int *readstate, int newstate, 1402 struct torture_random_state *trsp, 1403 struct rt_read_seg *rtrsp) 1404 { 1405 unsigned long flags; 1406 int idxnew = -1; 1407 int idxold = *readstate; 1408 int statesnew = ~*readstate & newstate; 1409 int statesold = *readstate & ~newstate; 1410 1411 WARN_ON_ONCE(idxold < 0); 1412 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); 1413 rtrsp->rt_readstate = newstate; 1414 1415 /* First, put new protection in place to avoid critical-section gap. */ 1416 if (statesnew & RCUTORTURE_RDR_BH) 1417 local_bh_disable(); 1418 if (statesnew & RCUTORTURE_RDR_IRQ) 1419 local_irq_disable(); 1420 if (statesnew & RCUTORTURE_RDR_PREEMPT) 1421 preempt_disable(); 1422 if (statesnew & RCUTORTURE_RDR_RBH) 1423 rcu_read_lock_bh(); 1424 if (statesnew & RCUTORTURE_RDR_SCHED) 1425 rcu_read_lock_sched(); 1426 if (statesnew & RCUTORTURE_RDR_RCU) 1427 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; 1428 1429 /* Next, remove old protection, irq first due to bh conflict. */ 1430 if (statesold & RCUTORTURE_RDR_IRQ) 1431 local_irq_enable(); 1432 if (statesold & RCUTORTURE_RDR_BH) 1433 local_bh_enable(); 1434 if (statesold & RCUTORTURE_RDR_PREEMPT) 1435 preempt_enable(); 1436 if (statesold & RCUTORTURE_RDR_RBH) 1437 rcu_read_unlock_bh(); 1438 if (statesold & RCUTORTURE_RDR_SCHED) 1439 rcu_read_unlock_sched(); 1440 if (statesold & RCUTORTURE_RDR_RCU) { 1441 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); 1442 1443 if (lockit) 1444 raw_spin_lock_irqsave(¤t->pi_lock, flags); 1445 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); 1446 if (lockit) 1447 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); 1448 } 1449 1450 /* Delay if neither beginning nor end and there was a change. */ 1451 if ((statesnew || statesold) && *readstate && newstate) 1452 cur_ops->read_delay(trsp, rtrsp); 1453 1454 /* Update the reader state. */ 1455 if (idxnew == -1) 1456 idxnew = idxold & ~RCUTORTURE_RDR_MASK; 1457 WARN_ON_ONCE(idxnew < 0); 1458 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1); 1459 *readstate = idxnew | newstate; 1460 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0); 1461 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1); 1462 } 1463 1464 /* Return the biggest extendables mask given current RCU and boot parameters. */ 1465 static int rcutorture_extend_mask_max(void) 1466 { 1467 int mask; 1468 1469 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND); 1470 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables; 1471 mask = mask | RCUTORTURE_RDR_RCU; 1472 return mask; 1473 } 1474 1475 /* Return a random protection state mask, but with at least one bit set. */ 1476 static int 1477 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp) 1478 { 1479 int mask = rcutorture_extend_mask_max(); 1480 unsigned long randmask1 = torture_random(trsp) >> 8; 1481 unsigned long randmask2 = randmask1 >> 3; 1482 1483 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); 1484 /* Mostly only one bit (need preemption!), sometimes lots of bits. */ 1485 if (!(randmask1 & 0x7)) 1486 mask = mask & randmask2; 1487 else 1488 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); 1489 /* Can't enable bh w/irq disabled. */ 1490 if ((mask & RCUTORTURE_RDR_IRQ) && 1491 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) || 1492 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH)))) 1493 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; 1494 return mask ?: RCUTORTURE_RDR_RCU; 1495 } 1496 1497 /* 1498 * Do a randomly selected number of extensions of an existing RCU read-side 1499 * critical section. 1500 */ 1501 static struct rt_read_seg * 1502 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, 1503 struct rt_read_seg *rtrsp) 1504 { 1505 int i; 1506 int j; 1507 int mask = rcutorture_extend_mask_max(); 1508 1509 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ 1510 if (!((mask - 1) & mask)) 1511 return rtrsp; /* Current RCU reader not extendable. */ 1512 /* Bias towards larger numbers of loops. */ 1513 i = (torture_random(trsp) >> 3); 1514 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; 1515 for (j = 0; j < i; j++) { 1516 mask = rcutorture_extend_mask(*readstate, trsp); 1517 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); 1518 } 1519 return &rtrsp[j]; 1520 } 1521 1522 /* 1523 * Do one read-side critical section, returning false if there was 1524 * no data to read. Can be invoked both from process context and 1525 * from a timer handler. 1526 */ 1527 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid) 1528 { 1529 unsigned long cookie; 1530 int i; 1531 unsigned long started; 1532 unsigned long completed; 1533 int newstate; 1534 struct rcu_torture *p; 1535 int pipe_count; 1536 int readstate = 0; 1537 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; 1538 struct rt_read_seg *rtrsp = &rtseg[0]; 1539 struct rt_read_seg *rtrsp1; 1540 unsigned long long ts; 1541 1542 WARN_ON_ONCE(!rcu_is_watching()); 1543 newstate = rcutorture_extend_mask(readstate, trsp); 1544 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); 1545 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1546 cookie = cur_ops->get_gp_state(); 1547 started = cur_ops->get_gp_seq(); 1548 ts = rcu_trace_clock_local(); 1549 p = rcu_dereference_check(rcu_torture_current, 1550 rcu_read_lock_bh_held() || 1551 rcu_read_lock_sched_held() || 1552 srcu_read_lock_held(srcu_ctlp) || 1553 rcu_read_lock_trace_held() || 1554 torturing_tasks()); 1555 if (p == NULL) { 1556 /* Wait for rcu_torture_writer to get underway */ 1557 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1558 return false; 1559 } 1560 if (p->rtort_mbtest == 0) 1561 atomic_inc(&n_rcu_torture_mberror); 1562 rcu_torture_reader_do_mbchk(myid, p, trsp); 1563 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); 1564 preempt_disable(); 1565 pipe_count = READ_ONCE(p->rtort_pipe_count); 1566 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 1567 /* Should not happen, but... */ 1568 pipe_count = RCU_TORTURE_PIPE_LEN; 1569 } 1570 completed = cur_ops->get_gp_seq(); 1571 if (pipe_count > 1) { 1572 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, 1573 ts, started, completed); 1574 rcu_ftrace_dump(DUMP_ALL); 1575 } 1576 __this_cpu_inc(rcu_torture_count[pipe_count]); 1577 completed = rcutorture_seq_diff(completed, started); 1578 if (completed > RCU_TORTURE_PIPE_LEN) { 1579 /* Should not happen, but... */ 1580 completed = RCU_TORTURE_PIPE_LEN; 1581 } 1582 __this_cpu_inc(rcu_torture_batch[completed]); 1583 preempt_enable(); 1584 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) 1585 WARN_ONCE(cur_ops->poll_gp_state(cookie), 1586 "%s: Cookie check 3 failed %s(%d) %lu->%lu\n", 1587 __func__, 1588 rcu_torture_writer_state_getname(), 1589 rcu_torture_writer_state, 1590 cookie, cur_ops->get_gp_state()); 1591 rcutorture_one_extend(&readstate, 0, trsp, rtrsp); 1592 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); 1593 // This next splat is expected behavior if leakpointer, especially 1594 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. 1595 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); 1596 1597 /* If error or close call, record the sequence of reader protections. */ 1598 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { 1599 i = 0; 1600 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) 1601 err_segs[i++] = *rtrsp1; 1602 rt_read_nsegs = i; 1603 } 1604 1605 return true; 1606 } 1607 1608 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand); 1609 1610 /* 1611 * RCU torture reader from timer handler. Dereferences rcu_torture_current, 1612 * incrementing the corresponding element of the pipeline array. The 1613 * counter in the element should never be greater than 1, otherwise, the 1614 * RCU implementation is broken. 1615 */ 1616 static void rcu_torture_timer(struct timer_list *unused) 1617 { 1618 atomic_long_inc(&n_rcu_torture_timers); 1619 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1); 1620 1621 /* Test call_rcu() invocation from interrupt handler. */ 1622 if (cur_ops->call) { 1623 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT); 1624 1625 if (rhp) 1626 cur_ops->call(rhp, rcu_torture_timer_cb); 1627 } 1628 } 1629 1630 /* 1631 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 1632 * incrementing the corresponding element of the pipeline array. The 1633 * counter in the element should never be greater than 1, otherwise, the 1634 * RCU implementation is broken. 1635 */ 1636 static int 1637 rcu_torture_reader(void *arg) 1638 { 1639 unsigned long lastsleep = jiffies; 1640 long myid = (long)arg; 1641 int mynumonline = myid; 1642 DEFINE_TORTURE_RANDOM(rand); 1643 struct timer_list t; 1644 1645 VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); 1646 set_user_nice(current, MAX_NICE); 1647 if (irqreader && cur_ops->irq_capable) 1648 timer_setup_on_stack(&t, rcu_torture_timer, 0); 1649 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 1650 do { 1651 if (irqreader && cur_ops->irq_capable) { 1652 if (!timer_pending(&t)) 1653 mod_timer(&t, jiffies + 1); 1654 } 1655 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop()) 1656 schedule_timeout_interruptible(HZ); 1657 if (time_after(jiffies, lastsleep) && !torture_must_stop()) { 1658 torture_hrtimeout_us(500, 1000, &rand); 1659 lastsleep = jiffies + 10; 1660 } 1661 while (torture_num_online_cpus() < mynumonline && !torture_must_stop()) 1662 schedule_timeout_interruptible(HZ / 5); 1663 stutter_wait("rcu_torture_reader"); 1664 } while (!torture_must_stop()); 1665 if (irqreader && cur_ops->irq_capable) { 1666 del_timer_sync(&t); 1667 destroy_timer_on_stack(&t); 1668 } 1669 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 1670 torture_kthread_stopping("rcu_torture_reader"); 1671 return 0; 1672 } 1673 1674 /* 1675 * Print torture statistics. Caller must ensure that there is only 1676 * one call to this function at a given time!!! This is normally 1677 * accomplished by relying on the module system to only have one copy 1678 * of the module loaded, and then by giving the rcu_torture_stats 1679 * kthread full control (or the init/cleanup functions when rcu_torture_stats 1680 * thread is not running). 1681 */ 1682 static void 1683 rcu_torture_stats_print(void) 1684 { 1685 int cpu; 1686 int i; 1687 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1688 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; 1689 struct rcu_torture *rtcp; 1690 static unsigned long rtcv_snap = ULONG_MAX; 1691 static bool splatted; 1692 struct task_struct *wtp; 1693 1694 for_each_possible_cpu(cpu) { 1695 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1696 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); 1697 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); 1698 } 1699 } 1700 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { 1701 if (pipesummary[i] != 0) 1702 break; 1703 } 1704 1705 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1706 rtcp = rcu_access_pointer(rcu_torture_current); 1707 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", 1708 rtcp, 1709 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", 1710 rcu_torture_current_version, 1711 list_empty(&rcu_torture_freelist), 1712 atomic_read(&n_rcu_torture_alloc), 1713 atomic_read(&n_rcu_torture_alloc_fail), 1714 atomic_read(&n_rcu_torture_free)); 1715 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ", 1716 atomic_read(&n_rcu_torture_mberror), 1717 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries), 1718 n_rcu_torture_barrier_error, 1719 n_rcu_torture_boost_ktrerror, 1720 n_rcu_torture_boost_rterror); 1721 pr_cont("rtbf: %ld rtb: %ld nt: %ld ", 1722 n_rcu_torture_boost_failure, 1723 n_rcu_torture_boosts, 1724 atomic_long_read(&n_rcu_torture_timers)); 1725 torture_onoff_stats(); 1726 pr_cont("barrier: %ld/%ld:%ld ", 1727 data_race(n_barrier_successes), 1728 data_race(n_barrier_attempts), 1729 data_race(n_rcu_torture_barrier_error)); 1730 pr_cont("read-exits: %ld\n", data_race(n_read_exits)); 1731 1732 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1733 if (atomic_read(&n_rcu_torture_mberror) || 1734 atomic_read(&n_rcu_torture_mbchk_fail) || 1735 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || 1736 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || 1737 i > 1) { 1738 pr_cont("%s", "!!! "); 1739 atomic_inc(&n_rcu_torture_error); 1740 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); 1741 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail)); 1742 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() 1743 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread 1744 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio 1745 WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed 1746 WARN_ON_ONCE(i > 1); // Too-short grace period 1747 } 1748 pr_cont("Reader Pipe: "); 1749 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1750 pr_cont(" %ld", pipesummary[i]); 1751 pr_cont("\n"); 1752 1753 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1754 pr_cont("Reader Batch: "); 1755 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 1756 pr_cont(" %ld", batchsummary[i]); 1757 pr_cont("\n"); 1758 1759 pr_alert("%s%s ", torture_type, TORTURE_FLAG); 1760 pr_cont("Free-Block Circulation: "); 1761 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 1762 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i])); 1763 } 1764 pr_cont("\n"); 1765 1766 if (cur_ops->stats) 1767 cur_ops->stats(); 1768 if (rtcv_snap == rcu_torture_current_version && 1769 rcu_access_pointer(rcu_torture_current) && 1770 !rcu_stall_is_suppressed()) { 1771 int __maybe_unused flags = 0; 1772 unsigned long __maybe_unused gp_seq = 0; 1773 1774 rcutorture_get_gp_data(cur_ops->ttype, 1775 &flags, &gp_seq); 1776 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, 1777 &flags, &gp_seq); 1778 wtp = READ_ONCE(writer_task); 1779 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n", 1780 rcu_torture_writer_state_getname(), 1781 rcu_torture_writer_state, gp_seq, flags, 1782 wtp == NULL ? ~0UL : wtp->state, 1783 wtp == NULL ? -1 : (int)task_cpu(wtp)); 1784 if (!splatted && wtp) { 1785 sched_show_task(wtp); 1786 splatted = true; 1787 } 1788 if (cur_ops->gp_kthread_dbg) 1789 cur_ops->gp_kthread_dbg(); 1790 rcu_ftrace_dump(DUMP_ALL); 1791 } 1792 rtcv_snap = rcu_torture_current_version; 1793 } 1794 1795 /* 1796 * Periodically prints torture statistics, if periodic statistics printing 1797 * was specified via the stat_interval module parameter. 1798 */ 1799 static int 1800 rcu_torture_stats(void *arg) 1801 { 1802 VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); 1803 do { 1804 schedule_timeout_interruptible(stat_interval * HZ); 1805 rcu_torture_stats_print(); 1806 torture_shutdown_absorb("rcu_torture_stats"); 1807 } while (!torture_must_stop()); 1808 torture_kthread_stopping("rcu_torture_stats"); 1809 return 0; 1810 } 1811 1812 static void 1813 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) 1814 { 1815 pr_alert("%s" TORTURE_FLAG 1816 "--- %s: nreaders=%d nfakewriters=%d " 1817 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1818 "shuffle_interval=%d stutter=%d irqreader=%d " 1819 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " 1820 "test_boost=%d/%d test_boost_interval=%d " 1821 "test_boost_duration=%d shutdown_secs=%d " 1822 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " 1823 "stall_cpu_block=%d " 1824 "n_barrier_cbs=%d " 1825 "onoff_interval=%d onoff_holdoff=%d " 1826 "read_exit_delay=%d read_exit_burst=%d\n", 1827 torture_type, tag, nrealreaders, nfakewriters, 1828 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1829 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, 1830 test_boost, cur_ops->can_boost, 1831 test_boost_interval, test_boost_duration, shutdown_secs, 1832 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, 1833 stall_cpu_block, 1834 n_barrier_cbs, 1835 onoff_interval, onoff_holdoff, 1836 read_exit_delay, read_exit_burst); 1837 } 1838 1839 static int rcutorture_booster_cleanup(unsigned int cpu) 1840 { 1841 struct task_struct *t; 1842 1843 if (boost_tasks[cpu] == NULL) 1844 return 0; 1845 mutex_lock(&boost_mutex); 1846 t = boost_tasks[cpu]; 1847 boost_tasks[cpu] = NULL; 1848 rcu_torture_enable_rt_throttle(); 1849 mutex_unlock(&boost_mutex); 1850 1851 /* This must be outside of the mutex, otherwise deadlock! */ 1852 torture_stop_kthread(rcu_torture_boost, t); 1853 return 0; 1854 } 1855 1856 static int rcutorture_booster_init(unsigned int cpu) 1857 { 1858 int retval; 1859 1860 if (boost_tasks[cpu] != NULL) 1861 return 0; /* Already created, nothing more to do. */ 1862 1863 /* Don't allow time recalculation while creating a new task. */ 1864 mutex_lock(&boost_mutex); 1865 rcu_torture_disable_rt_throttle(); 1866 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); 1867 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, 1868 cpu_to_node(cpu), 1869 "rcu_torture_boost"); 1870 if (IS_ERR(boost_tasks[cpu])) { 1871 retval = PTR_ERR(boost_tasks[cpu]); 1872 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); 1873 n_rcu_torture_boost_ktrerror++; 1874 boost_tasks[cpu] = NULL; 1875 mutex_unlock(&boost_mutex); 1876 return retval; 1877 } 1878 kthread_bind(boost_tasks[cpu], cpu); 1879 wake_up_process(boost_tasks[cpu]); 1880 mutex_unlock(&boost_mutex); 1881 return 0; 1882 } 1883 1884 /* 1885 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1886 * induces a CPU stall for the time specified by stall_cpu. 1887 */ 1888 static int rcu_torture_stall(void *args) 1889 { 1890 int idx; 1891 unsigned long stop_at; 1892 1893 VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); 1894 if (stall_cpu_holdoff > 0) { 1895 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); 1896 schedule_timeout_interruptible(stall_cpu_holdoff * HZ); 1897 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); 1898 } 1899 if (!kthread_should_stop() && stall_gp_kthread > 0) { 1900 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); 1901 rcu_gp_set_torture_wait(stall_gp_kthread * HZ); 1902 for (idx = 0; idx < stall_gp_kthread + 2; idx++) { 1903 if (kthread_should_stop()) 1904 break; 1905 schedule_timeout_uninterruptible(HZ); 1906 } 1907 } 1908 if (!kthread_should_stop() && stall_cpu > 0) { 1909 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); 1910 stop_at = ktime_get_seconds() + stall_cpu; 1911 /* RCU CPU stall is expected behavior in following code. */ 1912 idx = cur_ops->readlock(); 1913 if (stall_cpu_irqsoff) 1914 local_irq_disable(); 1915 else if (!stall_cpu_block) 1916 preempt_disable(); 1917 pr_alert("rcu_torture_stall start on CPU %d.\n", 1918 raw_smp_processor_id()); 1919 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), 1920 stop_at)) 1921 if (stall_cpu_block) 1922 schedule_timeout_uninterruptible(HZ); 1923 if (stall_cpu_irqsoff) 1924 local_irq_enable(); 1925 else if (!stall_cpu_block) 1926 preempt_enable(); 1927 cur_ops->readunlock(idx); 1928 } 1929 pr_alert("rcu_torture_stall end.\n"); 1930 torture_shutdown_absorb("rcu_torture_stall"); 1931 while (!kthread_should_stop()) 1932 schedule_timeout_interruptible(10 * HZ); 1933 return 0; 1934 } 1935 1936 /* Spawn CPU-stall kthread, if stall_cpu specified. */ 1937 static int __init rcu_torture_stall_init(void) 1938 { 1939 if (stall_cpu <= 0 && stall_gp_kthread <= 0) 1940 return 0; 1941 return torture_create_kthread(rcu_torture_stall, NULL, stall_task); 1942 } 1943 1944 /* State structure for forward-progress self-propagating RCU callback. */ 1945 struct fwd_cb_state { 1946 struct rcu_head rh; 1947 int stop; 1948 }; 1949 1950 /* 1951 * Forward-progress self-propagating RCU callback function. Because 1952 * callbacks run from softirq, this function is an implicit RCU read-side 1953 * critical section. 1954 */ 1955 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) 1956 { 1957 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); 1958 1959 if (READ_ONCE(fcsp->stop)) { 1960 WRITE_ONCE(fcsp->stop, 2); 1961 return; 1962 } 1963 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); 1964 } 1965 1966 /* State for continuous-flood RCU callbacks. */ 1967 struct rcu_fwd_cb { 1968 struct rcu_head rh; 1969 struct rcu_fwd_cb *rfc_next; 1970 struct rcu_fwd *rfc_rfp; 1971 int rfc_gps; 1972 }; 1973 1974 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ 1975 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ 1976 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ 1977 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ 1978 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) 1979 1980 struct rcu_launder_hist { 1981 long n_launders; 1982 unsigned long launder_gp_seq; 1983 }; 1984 1985 struct rcu_fwd { 1986 spinlock_t rcu_fwd_lock; 1987 struct rcu_fwd_cb *rcu_fwd_cb_head; 1988 struct rcu_fwd_cb **rcu_fwd_cb_tail; 1989 long n_launders_cb; 1990 unsigned long rcu_fwd_startat; 1991 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; 1992 unsigned long rcu_launder_gp_seq_start; 1993 }; 1994 1995 static DEFINE_MUTEX(rcu_fwd_mutex); 1996 static struct rcu_fwd *rcu_fwds; 1997 static bool rcu_fwd_emergency_stop; 1998 1999 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) 2000 { 2001 unsigned long gps; 2002 unsigned long gps_old; 2003 int i; 2004 int j; 2005 2006 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) 2007 if (rfp->n_launders_hist[i].n_launders > 0) 2008 break; 2009 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", 2010 __func__, jiffies - rfp->rcu_fwd_startat); 2011 gps_old = rfp->rcu_launder_gp_seq_start; 2012 for (j = 0; j <= i; j++) { 2013 gps = rfp->n_launders_hist[j].launder_gp_seq; 2014 pr_cont(" %ds/%d: %ld:%ld", 2015 j + 1, FWD_CBS_HIST_DIV, 2016 rfp->n_launders_hist[j].n_launders, 2017 rcutorture_seq_diff(gps, gps_old)); 2018 gps_old = gps; 2019 } 2020 pr_cont("\n"); 2021 } 2022 2023 /* Callback function for continuous-flood RCU callbacks. */ 2024 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) 2025 { 2026 unsigned long flags; 2027 int i; 2028 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); 2029 struct rcu_fwd_cb **rfcpp; 2030 struct rcu_fwd *rfp = rfcp->rfc_rfp; 2031 2032 rfcp->rfc_next = NULL; 2033 rfcp->rfc_gps++; 2034 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2035 rfcpp = rfp->rcu_fwd_cb_tail; 2036 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; 2037 WRITE_ONCE(*rfcpp, rfcp); 2038 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); 2039 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); 2040 if (i >= ARRAY_SIZE(rfp->n_launders_hist)) 2041 i = ARRAY_SIZE(rfp->n_launders_hist) - 1; 2042 rfp->n_launders_hist[i].n_launders++; 2043 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); 2044 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2045 } 2046 2047 // Give the scheduler a chance, even on nohz_full CPUs. 2048 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) 2049 { 2050 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { 2051 // Real call_rcu() floods hit userspace, so emulate that. 2052 if (need_resched() || (iter & 0xfff)) 2053 schedule(); 2054 return; 2055 } 2056 // No userspace emulation: CB invocation throttles call_rcu() 2057 cond_resched(); 2058 } 2059 2060 /* 2061 * Free all callbacks on the rcu_fwd_cb_head list, either because the 2062 * test is over or because we hit an OOM event. 2063 */ 2064 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) 2065 { 2066 unsigned long flags; 2067 unsigned long freed = 0; 2068 struct rcu_fwd_cb *rfcp; 2069 2070 for (;;) { 2071 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); 2072 rfcp = rfp->rcu_fwd_cb_head; 2073 if (!rfcp) { 2074 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2075 break; 2076 } 2077 rfp->rcu_fwd_cb_head = rfcp->rfc_next; 2078 if (!rfp->rcu_fwd_cb_head) 2079 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2080 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); 2081 kfree(rfcp); 2082 freed++; 2083 rcu_torture_fwd_prog_cond_resched(freed); 2084 if (tick_nohz_full_enabled()) { 2085 local_irq_save(flags); 2086 rcu_momentary_dyntick_idle(); 2087 local_irq_restore(flags); 2088 } 2089 } 2090 return freed; 2091 } 2092 2093 /* Carry out need_resched()/cond_resched() forward-progress testing. */ 2094 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, 2095 int *tested, int *tested_tries) 2096 { 2097 unsigned long cver; 2098 unsigned long dur; 2099 struct fwd_cb_state fcs; 2100 unsigned long gps; 2101 int idx; 2102 int sd; 2103 int sd4; 2104 bool selfpropcb = false; 2105 unsigned long stopat; 2106 static DEFINE_TORTURE_RANDOM(trs); 2107 2108 if (!cur_ops->sync) 2109 return; // Cannot do need_resched() forward progress testing without ->sync. 2110 if (cur_ops->call && cur_ops->cb_barrier) { 2111 init_rcu_head_on_stack(&fcs.rh); 2112 selfpropcb = true; 2113 } 2114 2115 /* Tight loop containing cond_resched(). */ 2116 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 2117 cur_ops->sync(); /* Later readers see above write. */ 2118 if (selfpropcb) { 2119 WRITE_ONCE(fcs.stop, 0); 2120 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); 2121 } 2122 cver = READ_ONCE(rcu_torture_current_version); 2123 gps = cur_ops->get_gp_seq(); 2124 sd = cur_ops->stall_dur() + 1; 2125 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; 2126 dur = sd4 + torture_random(&trs) % (sd - sd4); 2127 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2128 stopat = rfp->rcu_fwd_startat + dur; 2129 while (time_before(jiffies, stopat) && 2130 !shutdown_time_arrived() && 2131 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2132 idx = cur_ops->readlock(); 2133 udelay(10); 2134 cur_ops->readunlock(idx); 2135 if (!fwd_progress_need_resched || need_resched()) 2136 cond_resched(); 2137 } 2138 (*tested_tries)++; 2139 if (!time_before(jiffies, stopat) && 2140 !shutdown_time_arrived() && 2141 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2142 (*tested)++; 2143 cver = READ_ONCE(rcu_torture_current_version) - cver; 2144 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2145 WARN_ON(!cver && gps < 2); 2146 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); 2147 } 2148 if (selfpropcb) { 2149 WRITE_ONCE(fcs.stop, 1); 2150 cur_ops->sync(); /* Wait for running CB to complete. */ 2151 cur_ops->cb_barrier(); /* Wait for queued callbacks. */ 2152 } 2153 2154 if (selfpropcb) { 2155 WARN_ON(READ_ONCE(fcs.stop) != 2); 2156 destroy_rcu_head_on_stack(&fcs.rh); 2157 } 2158 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ 2159 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2160 } 2161 2162 /* Carry out call_rcu() forward-progress testing. */ 2163 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) 2164 { 2165 unsigned long cver; 2166 unsigned long flags; 2167 unsigned long gps; 2168 int i; 2169 long n_launders; 2170 long n_launders_cb_snap; 2171 long n_launders_sa; 2172 long n_max_cbs; 2173 long n_max_gps; 2174 struct rcu_fwd_cb *rfcp; 2175 struct rcu_fwd_cb *rfcpn; 2176 unsigned long stopat; 2177 unsigned long stoppedat; 2178 2179 if (READ_ONCE(rcu_fwd_emergency_stop)) 2180 return; /* Get out of the way quickly, no GP wait! */ 2181 if (!cur_ops->call) 2182 return; /* Can't do call_rcu() fwd prog without ->call. */ 2183 2184 /* Loop continuously posting RCU callbacks. */ 2185 WRITE_ONCE(rcu_fwd_cb_nodelay, true); 2186 cur_ops->sync(); /* Later readers see above write. */ 2187 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); 2188 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; 2189 n_launders = 0; 2190 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread 2191 n_launders_sa = 0; 2192 n_max_cbs = 0; 2193 n_max_gps = 0; 2194 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) 2195 rfp->n_launders_hist[i].n_launders = 0; 2196 cver = READ_ONCE(rcu_torture_current_version); 2197 gps = cur_ops->get_gp_seq(); 2198 rfp->rcu_launder_gp_seq_start = gps; 2199 tick_dep_set_task(current, TICK_DEP_BIT_RCU); 2200 while (time_before(jiffies, stopat) && 2201 !shutdown_time_arrived() && 2202 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { 2203 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); 2204 rfcpn = NULL; 2205 if (rfcp) 2206 rfcpn = READ_ONCE(rfcp->rfc_next); 2207 if (rfcpn) { 2208 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && 2209 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) 2210 break; 2211 rfp->rcu_fwd_cb_head = rfcpn; 2212 n_launders++; 2213 n_launders_sa++; 2214 } else { 2215 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); 2216 if (WARN_ON_ONCE(!rfcp)) { 2217 schedule_timeout_interruptible(1); 2218 continue; 2219 } 2220 n_max_cbs++; 2221 n_launders_sa = 0; 2222 rfcp->rfc_gps = 0; 2223 rfcp->rfc_rfp = rfp; 2224 } 2225 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); 2226 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); 2227 if (tick_nohz_full_enabled()) { 2228 local_irq_save(flags); 2229 rcu_momentary_dyntick_idle(); 2230 local_irq_restore(flags); 2231 } 2232 } 2233 stoppedat = jiffies; 2234 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); 2235 cver = READ_ONCE(rcu_torture_current_version) - cver; 2236 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); 2237 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ 2238 (void)rcu_torture_fwd_prog_cbfree(rfp); 2239 2240 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && 2241 !shutdown_time_arrived()) { 2242 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); 2243 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", 2244 __func__, 2245 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, 2246 n_launders + n_max_cbs - n_launders_cb_snap, 2247 n_launders, n_launders_sa, 2248 n_max_gps, n_max_cbs, cver, gps); 2249 rcu_torture_fwd_cb_hist(rfp); 2250 } 2251 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ 2252 tick_dep_clear_task(current, TICK_DEP_BIT_RCU); 2253 WRITE_ONCE(rcu_fwd_cb_nodelay, false); 2254 } 2255 2256 2257 /* 2258 * OOM notifier, but this only prints diagnostic information for the 2259 * current forward-progress test. 2260 */ 2261 static int rcutorture_oom_notify(struct notifier_block *self, 2262 unsigned long notused, void *nfreed) 2263 { 2264 struct rcu_fwd *rfp; 2265 2266 mutex_lock(&rcu_fwd_mutex); 2267 rfp = rcu_fwds; 2268 if (!rfp) { 2269 mutex_unlock(&rcu_fwd_mutex); 2270 return NOTIFY_OK; 2271 } 2272 WARN(1, "%s invoked upon OOM during forward-progress testing.\n", 2273 __func__); 2274 rcu_torture_fwd_cb_hist(rfp); 2275 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); 2276 WRITE_ONCE(rcu_fwd_emergency_stop, true); 2277 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ 2278 pr_info("%s: Freed %lu RCU callbacks.\n", 2279 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2280 rcu_barrier(); 2281 pr_info("%s: Freed %lu RCU callbacks.\n", 2282 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2283 rcu_barrier(); 2284 pr_info("%s: Freed %lu RCU callbacks.\n", 2285 __func__, rcu_torture_fwd_prog_cbfree(rfp)); 2286 smp_mb(); /* Frees before return to avoid redoing OOM. */ 2287 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ 2288 pr_info("%s returning after OOM processing.\n", __func__); 2289 mutex_unlock(&rcu_fwd_mutex); 2290 return NOTIFY_OK; 2291 } 2292 2293 static struct notifier_block rcutorture_oom_nb = { 2294 .notifier_call = rcutorture_oom_notify 2295 }; 2296 2297 /* Carry out grace-period forward-progress testing. */ 2298 static int rcu_torture_fwd_prog(void *args) 2299 { 2300 int oldnice = task_nice(current); 2301 struct rcu_fwd *rfp = args; 2302 int tested = 0; 2303 int tested_tries = 0; 2304 2305 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); 2306 rcu_bind_current_to_nocb(); 2307 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) 2308 set_user_nice(current, MAX_NICE); 2309 do { 2310 schedule_timeout_interruptible(fwd_progress_holdoff * HZ); 2311 WRITE_ONCE(rcu_fwd_emergency_stop, false); 2312 if (!IS_ENABLED(CONFIG_TINY_RCU) || 2313 rcu_inkernel_boot_has_ended()) 2314 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); 2315 if (rcu_inkernel_boot_has_ended()) 2316 rcu_torture_fwd_prog_cr(rfp); 2317 2318 /* Avoid slow periods, better to test when busy. */ 2319 if (stutter_wait("rcu_torture_fwd_prog")) 2320 sched_set_normal(current, oldnice); 2321 } while (!torture_must_stop()); 2322 /* Short runs might not contain a valid forward-progress attempt. */ 2323 WARN_ON(!tested && tested_tries >= 5); 2324 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); 2325 torture_kthread_stopping("rcu_torture_fwd_prog"); 2326 return 0; 2327 } 2328 2329 /* If forward-progress checking is requested and feasible, spawn the thread. */ 2330 static int __init rcu_torture_fwd_prog_init(void) 2331 { 2332 struct rcu_fwd *rfp; 2333 2334 if (!fwd_progress) 2335 return 0; /* Not requested, so don't do it. */ 2336 if ((!cur_ops->sync && !cur_ops->call) || 2337 !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) { 2338 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); 2339 return 0; 2340 } 2341 if (stall_cpu > 0) { 2342 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); 2343 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) 2344 return -EINVAL; /* In module, can fail back to user. */ 2345 WARN_ON(1); /* Make sure rcutorture notices conflict. */ 2346 return 0; 2347 } 2348 if (fwd_progress_holdoff <= 0) 2349 fwd_progress_holdoff = 1; 2350 if (fwd_progress_div <= 0) 2351 fwd_progress_div = 4; 2352 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); 2353 if (!rfp) 2354 return -ENOMEM; 2355 spin_lock_init(&rfp->rcu_fwd_lock); 2356 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; 2357 mutex_lock(&rcu_fwd_mutex); 2358 rcu_fwds = rfp; 2359 mutex_unlock(&rcu_fwd_mutex); 2360 register_oom_notifier(&rcutorture_oom_nb); 2361 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); 2362 } 2363 2364 static void rcu_torture_fwd_prog_cleanup(void) 2365 { 2366 struct rcu_fwd *rfp; 2367 2368 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); 2369 rfp = rcu_fwds; 2370 mutex_lock(&rcu_fwd_mutex); 2371 rcu_fwds = NULL; 2372 mutex_unlock(&rcu_fwd_mutex); 2373 unregister_oom_notifier(&rcutorture_oom_nb); 2374 kfree(rfp); 2375 } 2376 2377 /* Callback function for RCU barrier testing. */ 2378 static void rcu_torture_barrier_cbf(struct rcu_head *rcu) 2379 { 2380 atomic_inc(&barrier_cbs_invoked); 2381 } 2382 2383 /* IPI handler to get callback posted on desired CPU, if online. */ 2384 static void rcu_torture_barrier1cb(void *rcu_void) 2385 { 2386 struct rcu_head *rhp = rcu_void; 2387 2388 cur_ops->call(rhp, rcu_torture_barrier_cbf); 2389 } 2390 2391 /* kthread function to register callbacks used to test RCU barriers. */ 2392 static int rcu_torture_barrier_cbs(void *arg) 2393 { 2394 long myid = (long)arg; 2395 bool lastphase = false; 2396 bool newphase; 2397 struct rcu_head rcu; 2398 2399 init_rcu_head_on_stack(&rcu); 2400 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); 2401 set_user_nice(current, MAX_NICE); 2402 do { 2403 wait_event(barrier_cbs_wq[myid], 2404 (newphase = 2405 smp_load_acquire(&barrier_phase)) != lastphase || 2406 torture_must_stop()); 2407 lastphase = newphase; 2408 if (torture_must_stop()) 2409 break; 2410 /* 2411 * The above smp_load_acquire() ensures barrier_phase load 2412 * is ordered before the following ->call(). 2413 */ 2414 if (smp_call_function_single(myid, rcu_torture_barrier1cb, 2415 &rcu, 1)) { 2416 // IPI failed, so use direct call from current CPU. 2417 cur_ops->call(&rcu, rcu_torture_barrier_cbf); 2418 } 2419 if (atomic_dec_and_test(&barrier_cbs_count)) 2420 wake_up(&barrier_wq); 2421 } while (!torture_must_stop()); 2422 if (cur_ops->cb_barrier != NULL) 2423 cur_ops->cb_barrier(); 2424 destroy_rcu_head_on_stack(&rcu); 2425 torture_kthread_stopping("rcu_torture_barrier_cbs"); 2426 return 0; 2427 } 2428 2429 /* kthread function to drive and coordinate RCU barrier testing. */ 2430 static int rcu_torture_barrier(void *arg) 2431 { 2432 int i; 2433 2434 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); 2435 do { 2436 atomic_set(&barrier_cbs_invoked, 0); 2437 atomic_set(&barrier_cbs_count, n_barrier_cbs); 2438 /* Ensure barrier_phase ordered after prior assignments. */ 2439 smp_store_release(&barrier_phase, !barrier_phase); 2440 for (i = 0; i < n_barrier_cbs; i++) 2441 wake_up(&barrier_cbs_wq[i]); 2442 wait_event(barrier_wq, 2443 atomic_read(&barrier_cbs_count) == 0 || 2444 torture_must_stop()); 2445 if (torture_must_stop()) 2446 break; 2447 n_barrier_attempts++; 2448 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ 2449 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) { 2450 n_rcu_torture_barrier_error++; 2451 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", 2452 atomic_read(&barrier_cbs_invoked), 2453 n_barrier_cbs); 2454 WARN_ON(1); 2455 // Wait manually for the remaining callbacks 2456 i = 0; 2457 do { 2458 if (WARN_ON(i++ > HZ)) 2459 i = INT_MIN; 2460 schedule_timeout_interruptible(1); 2461 cur_ops->cb_barrier(); 2462 } while (atomic_read(&barrier_cbs_invoked) != 2463 n_barrier_cbs && 2464 !torture_must_stop()); 2465 smp_mb(); // Can't trust ordering if broken. 2466 if (!torture_must_stop()) 2467 pr_err("Recovered: barrier_cbs_invoked = %d\n", 2468 atomic_read(&barrier_cbs_invoked)); 2469 } else { 2470 n_barrier_successes++; 2471 } 2472 schedule_timeout_interruptible(HZ / 10); 2473 } while (!torture_must_stop()); 2474 torture_kthread_stopping("rcu_torture_barrier"); 2475 return 0; 2476 } 2477 2478 /* Initialize RCU barrier testing. */ 2479 static int rcu_torture_barrier_init(void) 2480 { 2481 int i; 2482 int ret; 2483 2484 if (n_barrier_cbs <= 0) 2485 return 0; 2486 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { 2487 pr_alert("%s" TORTURE_FLAG 2488 " Call or barrier ops missing for %s,\n", 2489 torture_type, cur_ops->name); 2490 pr_alert("%s" TORTURE_FLAG 2491 " RCU barrier testing omitted from run.\n", 2492 torture_type); 2493 return 0; 2494 } 2495 atomic_set(&barrier_cbs_count, 0); 2496 atomic_set(&barrier_cbs_invoked, 0); 2497 barrier_cbs_tasks = 2498 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]), 2499 GFP_KERNEL); 2500 barrier_cbs_wq = 2501 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL); 2502 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq) 2503 return -ENOMEM; 2504 for (i = 0; i < n_barrier_cbs; i++) { 2505 init_waitqueue_head(&barrier_cbs_wq[i]); 2506 ret = torture_create_kthread(rcu_torture_barrier_cbs, 2507 (void *)(long)i, 2508 barrier_cbs_tasks[i]); 2509 if (ret) 2510 return ret; 2511 } 2512 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); 2513 } 2514 2515 /* Clean up after RCU barrier testing. */ 2516 static void rcu_torture_barrier_cleanup(void) 2517 { 2518 int i; 2519 2520 torture_stop_kthread(rcu_torture_barrier, barrier_task); 2521 if (barrier_cbs_tasks != NULL) { 2522 for (i = 0; i < n_barrier_cbs; i++) 2523 torture_stop_kthread(rcu_torture_barrier_cbs, 2524 barrier_cbs_tasks[i]); 2525 kfree(barrier_cbs_tasks); 2526 barrier_cbs_tasks = NULL; 2527 } 2528 if (barrier_cbs_wq != NULL) { 2529 kfree(barrier_cbs_wq); 2530 barrier_cbs_wq = NULL; 2531 } 2532 } 2533 2534 static bool rcu_torture_can_boost(void) 2535 { 2536 static int boost_warn_once; 2537 int prio; 2538 2539 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2) 2540 return false; 2541 2542 prio = rcu_get_gp_kthreads_prio(); 2543 if (!prio) 2544 return false; 2545 2546 if (prio < 2) { 2547 if (boost_warn_once == 1) 2548 return false; 2549 2550 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME); 2551 boost_warn_once = 1; 2552 return false; 2553 } 2554 2555 return true; 2556 } 2557 2558 static bool read_exit_child_stop; 2559 static bool read_exit_child_stopped; 2560 static wait_queue_head_t read_exit_wq; 2561 2562 // Child kthread which just does an rcutorture reader and exits. 2563 static int rcu_torture_read_exit_child(void *trsp_in) 2564 { 2565 struct torture_random_state *trsp = trsp_in; 2566 2567 set_user_nice(current, MAX_NICE); 2568 // Minimize time between reading and exiting. 2569 while (!kthread_should_stop()) 2570 schedule_timeout_uninterruptible(1); 2571 (void)rcu_torture_one_read(trsp, -1); 2572 return 0; 2573 } 2574 2575 // Parent kthread which creates and destroys read-exit child kthreads. 2576 static int rcu_torture_read_exit(void *unused) 2577 { 2578 int count = 0; 2579 bool errexit = false; 2580 int i; 2581 struct task_struct *tsp; 2582 DEFINE_TORTURE_RANDOM(trs); 2583 2584 // Allocate and initialize. 2585 set_user_nice(current, MAX_NICE); 2586 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); 2587 2588 // Each pass through this loop does one read-exit episode. 2589 do { 2590 if (++count > read_exit_burst) { 2591 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); 2592 rcu_barrier(); // Wait for task_struct free, avoid OOM. 2593 for (i = 0; i < read_exit_delay; i++) { 2594 schedule_timeout_uninterruptible(HZ); 2595 if (READ_ONCE(read_exit_child_stop)) 2596 break; 2597 } 2598 if (!READ_ONCE(read_exit_child_stop)) 2599 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); 2600 count = 0; 2601 } 2602 if (READ_ONCE(read_exit_child_stop)) 2603 break; 2604 // Spawn child. 2605 tsp = kthread_run(rcu_torture_read_exit_child, 2606 &trs, "%s", 2607 "rcu_torture_read_exit_child"); 2608 if (IS_ERR(tsp)) { 2609 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2610 errexit = true; 2611 tsp = NULL; 2612 break; 2613 } 2614 cond_resched(); 2615 kthread_stop(tsp); 2616 n_read_exits ++; 2617 stutter_wait("rcu_torture_read_exit"); 2618 } while (!errexit && !READ_ONCE(read_exit_child_stop)); 2619 2620 // Clean up and exit. 2621 smp_store_release(&read_exit_child_stopped, true); // After reaping. 2622 smp_mb(); // Store before wakeup. 2623 wake_up(&read_exit_wq); 2624 while (!torture_must_stop()) 2625 schedule_timeout_uninterruptible(1); 2626 torture_kthread_stopping("rcu_torture_read_exit"); 2627 return 0; 2628 } 2629 2630 static int rcu_torture_read_exit_init(void) 2631 { 2632 if (read_exit_burst <= 0) 2633 return -EINVAL; 2634 init_waitqueue_head(&read_exit_wq); 2635 read_exit_child_stop = false; 2636 read_exit_child_stopped = false; 2637 return torture_create_kthread(rcu_torture_read_exit, NULL, 2638 read_exit_task); 2639 } 2640 2641 static void rcu_torture_read_exit_cleanup(void) 2642 { 2643 if (!read_exit_task) 2644 return; 2645 WRITE_ONCE(read_exit_child_stop, true); 2646 smp_mb(); // Above write before wait. 2647 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); 2648 torture_stop_kthread(rcutorture_read_exit, read_exit_task); 2649 } 2650 2651 static enum cpuhp_state rcutor_hp; 2652 2653 static void 2654 rcu_torture_cleanup(void) 2655 { 2656 int firsttime; 2657 int flags = 0; 2658 unsigned long gp_seq = 0; 2659 int i; 2660 2661 if (torture_cleanup_begin()) { 2662 if (cur_ops->cb_barrier != NULL) 2663 cur_ops->cb_barrier(); 2664 return; 2665 } 2666 if (!cur_ops) { 2667 torture_cleanup_end(); 2668 return; 2669 } 2670 2671 if (cur_ops->gp_kthread_dbg) 2672 cur_ops->gp_kthread_dbg(); 2673 rcu_torture_read_exit_cleanup(); 2674 rcu_torture_barrier_cleanup(); 2675 rcu_torture_fwd_prog_cleanup(); 2676 torture_stop_kthread(rcu_torture_stall, stall_task); 2677 torture_stop_kthread(rcu_torture_writer, writer_task); 2678 2679 if (reader_tasks) { 2680 for (i = 0; i < nrealreaders; i++) 2681 torture_stop_kthread(rcu_torture_reader, 2682 reader_tasks[i]); 2683 kfree(reader_tasks); 2684 reader_tasks = NULL; 2685 } 2686 kfree(rcu_torture_reader_mbchk); 2687 rcu_torture_reader_mbchk = NULL; 2688 2689 if (fakewriter_tasks) { 2690 for (i = 0; i < nfakewriters; i++) 2691 torture_stop_kthread(rcu_torture_fakewriter, 2692 fakewriter_tasks[i]); 2693 kfree(fakewriter_tasks); 2694 fakewriter_tasks = NULL; 2695 } 2696 2697 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2698 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2699 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", 2700 cur_ops->name, (long)gp_seq, flags, 2701 rcutorture_seq_diff(gp_seq, start_gp_seq)); 2702 torture_stop_kthread(rcu_torture_stats, stats_task); 2703 torture_stop_kthread(rcu_torture_fqs, fqs_task); 2704 if (rcu_torture_can_boost()) 2705 cpuhp_remove_state(rcutor_hp); 2706 2707 /* 2708 * Wait for all RCU callbacks to fire, then do torture-type-specific 2709 * cleanup operations. 2710 */ 2711 if (cur_ops->cb_barrier != NULL) 2712 cur_ops->cb_barrier(); 2713 if (cur_ops->cleanup != NULL) 2714 cur_ops->cleanup(); 2715 2716 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 2717 2718 if (err_segs_recorded) { 2719 pr_alert("Failure/close-call rcutorture reader segments:\n"); 2720 if (rt_read_nsegs == 0) 2721 pr_alert("\t: No segments recorded!!!\n"); 2722 firsttime = 1; 2723 for (i = 0; i < rt_read_nsegs; i++) { 2724 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); 2725 if (err_segs[i].rt_delay_jiffies != 0) { 2726 pr_cont("%s%ldjiffies", firsttime ? "" : "+", 2727 err_segs[i].rt_delay_jiffies); 2728 firsttime = 0; 2729 } 2730 if (err_segs[i].rt_delay_ms != 0) { 2731 pr_cont("%s%ldms", firsttime ? "" : "+", 2732 err_segs[i].rt_delay_ms); 2733 firsttime = 0; 2734 } 2735 if (err_segs[i].rt_delay_us != 0) { 2736 pr_cont("%s%ldus", firsttime ? "" : "+", 2737 err_segs[i].rt_delay_us); 2738 firsttime = 0; 2739 } 2740 pr_cont("%s\n", 2741 err_segs[i].rt_preempted ? "preempted" : ""); 2742 2743 } 2744 } 2745 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) 2746 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); 2747 else if (torture_onoff_failures()) 2748 rcu_torture_print_module_parms(cur_ops, 2749 "End of test: RCU_HOTPLUG"); 2750 else 2751 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS"); 2752 torture_cleanup_end(); 2753 } 2754 2755 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2756 static void rcu_torture_leak_cb(struct rcu_head *rhp) 2757 { 2758 } 2759 2760 static void rcu_torture_err_cb(struct rcu_head *rhp) 2761 { 2762 /* 2763 * This -might- happen due to race conditions, but is unlikely. 2764 * The scenario that leads to this happening is that the 2765 * first of the pair of duplicate callbacks is queued, 2766 * someone else starts a grace period that includes that 2767 * callback, then the second of the pair must wait for the 2768 * next grace period. Unlikely, but can happen. If it 2769 * does happen, the debug-objects subsystem won't have splatted. 2770 */ 2771 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME); 2772 } 2773 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2774 2775 /* 2776 * Verify that double-free causes debug-objects to complain, but only 2777 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test 2778 * cannot be carried out. 2779 */ 2780 static void rcu_test_debug_objects(void) 2781 { 2782 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 2783 struct rcu_head rh1; 2784 struct rcu_head rh2; 2785 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); 2786 2787 init_rcu_head_on_stack(&rh1); 2788 init_rcu_head_on_stack(&rh2); 2789 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME); 2790 2791 /* Try to queue the rh2 pair of callbacks for the same grace period. */ 2792 preempt_disable(); /* Prevent preemption from interrupting test. */ 2793 rcu_read_lock(); /* Make it impossible to finish a grace period. */ 2794 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */ 2795 local_irq_disable(); /* Make it harder to start a new grace period. */ 2796 call_rcu(&rh2, rcu_torture_leak_cb); 2797 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */ 2798 if (rhp) { 2799 call_rcu(rhp, rcu_torture_leak_cb); 2800 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */ 2801 } 2802 local_irq_enable(); 2803 rcu_read_unlock(); 2804 preempt_enable(); 2805 2806 /* Wait for them all to get done so we can safely return. */ 2807 rcu_barrier(); 2808 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME); 2809 destroy_rcu_head_on_stack(&rh1); 2810 destroy_rcu_head_on_stack(&rh2); 2811 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2812 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME); 2813 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 2814 } 2815 2816 static void rcutorture_sync(void) 2817 { 2818 static unsigned long n; 2819 2820 if (cur_ops->sync && !(++n & 0xfff)) 2821 cur_ops->sync(); 2822 } 2823 2824 static int __init 2825 rcu_torture_init(void) 2826 { 2827 long i; 2828 int cpu; 2829 int firsterr = 0; 2830 int flags = 0; 2831 unsigned long gp_seq = 0; 2832 static struct rcu_torture_ops *torture_ops[] = { 2833 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, 2834 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, 2835 &tasks_tracing_ops, &trivial_ops, 2836 }; 2837 2838 if (!torture_init_begin(torture_type, verbose)) 2839 return -EBUSY; 2840 2841 /* Process args and tell the world that the torturer is on the job. */ 2842 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 2843 cur_ops = torture_ops[i]; 2844 if (strcmp(torture_type, cur_ops->name) == 0) 2845 break; 2846 } 2847 if (i == ARRAY_SIZE(torture_ops)) { 2848 pr_alert("rcu-torture: invalid torture type: \"%s\"\n", 2849 torture_type); 2850 pr_alert("rcu-torture types:"); 2851 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 2852 pr_cont(" %s", torture_ops[i]->name); 2853 pr_cont("\n"); 2854 firsterr = -EINVAL; 2855 cur_ops = NULL; 2856 goto unwind; 2857 } 2858 if (cur_ops->fqs == NULL && fqs_duration != 0) { 2859 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); 2860 fqs_duration = 0; 2861 } 2862 if (cur_ops->init) 2863 cur_ops->init(); 2864 2865 if (nreaders >= 0) { 2866 nrealreaders = nreaders; 2867 } else { 2868 nrealreaders = num_online_cpus() - 2 - nreaders; 2869 if (nrealreaders <= 0) 2870 nrealreaders = 1; 2871 } 2872 rcu_torture_print_module_parms(cur_ops, "Start of test"); 2873 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); 2874 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); 2875 start_gp_seq = gp_seq; 2876 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", 2877 cur_ops->name, (long)gp_seq, flags); 2878 2879 /* Set up the freelist. */ 2880 2881 INIT_LIST_HEAD(&rcu_torture_freelist); 2882 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) { 2883 rcu_tortures[i].rtort_mbtest = 0; 2884 list_add_tail(&rcu_tortures[i].rtort_free, 2885 &rcu_torture_freelist); 2886 } 2887 2888 /* Initialize the statistics so that each run gets its own numbers. */ 2889 2890 rcu_torture_current = NULL; 2891 rcu_torture_current_version = 0; 2892 atomic_set(&n_rcu_torture_alloc, 0); 2893 atomic_set(&n_rcu_torture_alloc_fail, 0); 2894 atomic_set(&n_rcu_torture_free, 0); 2895 atomic_set(&n_rcu_torture_mberror, 0); 2896 atomic_set(&n_rcu_torture_mbchk_fail, 0); 2897 atomic_set(&n_rcu_torture_mbchk_tries, 0); 2898 atomic_set(&n_rcu_torture_error, 0); 2899 n_rcu_torture_barrier_error = 0; 2900 n_rcu_torture_boost_ktrerror = 0; 2901 n_rcu_torture_boost_rterror = 0; 2902 n_rcu_torture_boost_failure = 0; 2903 n_rcu_torture_boosts = 0; 2904 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) 2905 atomic_set(&rcu_torture_wcount[i], 0); 2906 for_each_possible_cpu(cpu) { 2907 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { 2908 per_cpu(rcu_torture_count, cpu)[i] = 0; 2909 per_cpu(rcu_torture_batch, cpu)[i] = 0; 2910 } 2911 } 2912 err_segs_recorded = 0; 2913 rt_read_nsegs = 0; 2914 2915 /* Start up the kthreads. */ 2916 2917 rcu_torture_write_types(); 2918 firsterr = torture_create_kthread(rcu_torture_writer, NULL, 2919 writer_task); 2920 if (firsterr) 2921 goto unwind; 2922 if (nfakewriters > 0) { 2923 fakewriter_tasks = kcalloc(nfakewriters, 2924 sizeof(fakewriter_tasks[0]), 2925 GFP_KERNEL); 2926 if (fakewriter_tasks == NULL) { 2927 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2928 firsterr = -ENOMEM; 2929 goto unwind; 2930 } 2931 } 2932 for (i = 0; i < nfakewriters; i++) { 2933 firsterr = torture_create_kthread(rcu_torture_fakewriter, 2934 NULL, fakewriter_tasks[i]); 2935 if (firsterr) 2936 goto unwind; 2937 } 2938 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]), 2939 GFP_KERNEL); 2940 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk), 2941 GFP_KERNEL); 2942 if (!reader_tasks || !rcu_torture_reader_mbchk) { 2943 VERBOSE_TOROUT_ERRSTRING("out of memory"); 2944 firsterr = -ENOMEM; 2945 goto unwind; 2946 } 2947 for (i = 0; i < nrealreaders; i++) { 2948 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1; 2949 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, 2950 reader_tasks[i]); 2951 if (firsterr) 2952 goto unwind; 2953 } 2954 if (stat_interval > 0) { 2955 firsterr = torture_create_kthread(rcu_torture_stats, NULL, 2956 stats_task); 2957 if (firsterr) 2958 goto unwind; 2959 } 2960 if (test_no_idle_hz && shuffle_interval > 0) { 2961 firsterr = torture_shuffle_init(shuffle_interval * HZ); 2962 if (firsterr) 2963 goto unwind; 2964 } 2965 if (stutter < 0) 2966 stutter = 0; 2967 if (stutter) { 2968 int t; 2969 2970 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; 2971 firsterr = torture_stutter_init(stutter * HZ, t); 2972 if (firsterr) 2973 goto unwind; 2974 } 2975 if (fqs_duration < 0) 2976 fqs_duration = 0; 2977 if (fqs_duration) { 2978 /* Create the fqs thread */ 2979 firsterr = torture_create_kthread(rcu_torture_fqs, NULL, 2980 fqs_task); 2981 if (firsterr) 2982 goto unwind; 2983 } 2984 if (test_boost_interval < 1) 2985 test_boost_interval = 1; 2986 if (test_boost_duration < 2) 2987 test_boost_duration = 2; 2988 if (rcu_torture_can_boost()) { 2989 2990 boost_starttime = jiffies + test_boost_interval * HZ; 2991 2992 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE", 2993 rcutorture_booster_init, 2994 rcutorture_booster_cleanup); 2995 if (firsterr < 0) 2996 goto unwind; 2997 rcutor_hp = firsterr; 2998 } 2999 shutdown_jiffies = jiffies + shutdown_secs * HZ; 3000 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); 3001 if (firsterr) 3002 goto unwind; 3003 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, 3004 rcutorture_sync); 3005 if (firsterr) 3006 goto unwind; 3007 firsterr = rcu_torture_stall_init(); 3008 if (firsterr) 3009 goto unwind; 3010 firsterr = rcu_torture_fwd_prog_init(); 3011 if (firsterr) 3012 goto unwind; 3013 firsterr = rcu_torture_barrier_init(); 3014 if (firsterr) 3015 goto unwind; 3016 firsterr = rcu_torture_read_exit_init(); 3017 if (firsterr) 3018 goto unwind; 3019 if (object_debug) 3020 rcu_test_debug_objects(); 3021 torture_init_end(); 3022 return 0; 3023 3024 unwind: 3025 torture_init_end(); 3026 rcu_torture_cleanup(); 3027 if (shutdown_secs) { 3028 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); 3029 kernel_power_off(); 3030 } 3031 return firsterr; 3032 } 3033 3034 module_init(rcu_torture_init); 3035 module_exit(rcu_torture_cleanup); 3036