1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Module-based torture test facility for locking 4 * 5 * Copyright (C) IBM Corporation, 2014 6 * 7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Davidlohr Bueso <dave@stgolabs.net> 9 * Based on kernel/rcu/torture.c. 10 */ 11 12 #define pr_fmt(fmt) fmt 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/kthread.h> 17 #include <linux/sched/rt.h> 18 #include <linux/spinlock.h> 19 #include <linux/mutex.h> 20 #include <linux/rwsem.h> 21 #include <linux/smp.h> 22 #include <linux/interrupt.h> 23 #include <linux/sched.h> 24 #include <uapi/linux/sched/types.h> 25 #include <linux/rtmutex.h> 26 #include <linux/atomic.h> 27 #include <linux/moduleparam.h> 28 #include <linux/delay.h> 29 #include <linux/slab.h> 30 #include <linux/torture.h> 31 #include <linux/reboot.h> 32 33 MODULE_LICENSE("GPL"); 34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 35 36 torture_param(int, nwriters_stress, -1, 37 "Number of write-locking stress-test threads"); 38 torture_param(int, nreaders_stress, -1, 39 "Number of read-locking stress-test threads"); 40 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 41 torture_param(int, onoff_interval, 0, 42 "Time between CPU hotplugs (s), 0=disable"); 43 torture_param(int, shuffle_interval, 3, 44 "Number of jiffies between shuffles, 0=disable"); 45 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 46 torture_param(int, stat_interval, 60, 47 "Number of seconds between stats printk()s"); 48 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 49 torture_param(int, rt_boost, 2, 50 "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); 51 torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens."); 52 torture_param(int, verbose, 1, 53 "Enable verbose debugging printk()s"); 54 55 static char *torture_type = "spin_lock"; 56 module_param(torture_type, charp, 0444); 57 MODULE_PARM_DESC(torture_type, 58 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 59 60 static struct task_struct *stats_task; 61 static struct task_struct **writer_tasks; 62 static struct task_struct **reader_tasks; 63 64 static bool lock_is_write_held; 65 static atomic_t lock_is_read_held; 66 static unsigned long last_lock_release; 67 68 struct lock_stress_stats { 69 long n_lock_fail; 70 long n_lock_acquired; 71 }; 72 73 /* Forward reference. */ 74 static void lock_torture_cleanup(void); 75 76 /* 77 * Operations vector for selecting different types of tests. 78 */ 79 struct lock_torture_ops { 80 void (*init)(void); 81 void (*exit)(void); 82 int (*writelock)(int tid); 83 void (*write_delay)(struct torture_random_state *trsp); 84 void (*task_boost)(struct torture_random_state *trsp); 85 void (*writeunlock)(int tid); 86 int (*readlock)(int tid); 87 void (*read_delay)(struct torture_random_state *trsp); 88 void (*readunlock)(int tid); 89 90 unsigned long flags; /* for irq spinlocks */ 91 const char *name; 92 }; 93 94 struct lock_torture_cxt { 95 int nrealwriters_stress; 96 int nrealreaders_stress; 97 bool debug_lock; 98 bool init_called; 99 atomic_t n_lock_torture_errors; 100 struct lock_torture_ops *cur_ops; 101 struct lock_stress_stats *lwsa; /* writer statistics */ 102 struct lock_stress_stats *lrsa; /* reader statistics */ 103 }; 104 static struct lock_torture_cxt cxt = { 0, 0, false, false, 105 ATOMIC_INIT(0), 106 NULL, NULL}; 107 /* 108 * Definitions for lock torture testing. 109 */ 110 111 static int torture_lock_busted_write_lock(int tid __maybe_unused) 112 { 113 return 0; /* BUGGY, do not use in real life!!! */ 114 } 115 116 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 117 { 118 const unsigned long longdelay_ms = 100; 119 120 /* We want a long delay occasionally to force massive contention. */ 121 if (!(torture_random(trsp) % 122 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 123 mdelay(longdelay_ms); 124 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 125 torture_preempt_schedule(); /* Allow test to be preempted. */ 126 } 127 128 static void torture_lock_busted_write_unlock(int tid __maybe_unused) 129 { 130 /* BUGGY, do not use in real life!!! */ 131 } 132 133 static void __torture_rt_boost(struct torture_random_state *trsp) 134 { 135 const unsigned int factor = rt_boost_factor; 136 137 if (!rt_task(current)) { 138 /* 139 * Boost priority once every rt_boost_factor operations. When 140 * the task tries to take the lock, the rtmutex it will account 141 * for the new priority, and do any corresponding pi-dance. 142 */ 143 if (trsp && !(torture_random(trsp) % 144 (cxt.nrealwriters_stress * factor))) { 145 sched_set_fifo(current); 146 } else /* common case, do nothing */ 147 return; 148 } else { 149 /* 150 * The task will remain boosted for another 10 * rt_boost_factor 151 * operations, then restored back to its original prio, and so 152 * forth. 153 * 154 * When @trsp is nil, we want to force-reset the task for 155 * stopping the kthread. 156 */ 157 if (!trsp || !(torture_random(trsp) % 158 (cxt.nrealwriters_stress * factor * 2))) { 159 sched_set_normal(current, 0); 160 } else /* common case, do nothing */ 161 return; 162 } 163 } 164 165 static void torture_rt_boost(struct torture_random_state *trsp) 166 { 167 if (rt_boost != 2) 168 return; 169 170 __torture_rt_boost(trsp); 171 } 172 173 static struct lock_torture_ops lock_busted_ops = { 174 .writelock = torture_lock_busted_write_lock, 175 .write_delay = torture_lock_busted_write_delay, 176 .task_boost = torture_rt_boost, 177 .writeunlock = torture_lock_busted_write_unlock, 178 .readlock = NULL, 179 .read_delay = NULL, 180 .readunlock = NULL, 181 .name = "lock_busted" 182 }; 183 184 static DEFINE_SPINLOCK(torture_spinlock); 185 186 static int torture_spin_lock_write_lock(int tid __maybe_unused) 187 __acquires(torture_spinlock) 188 { 189 spin_lock(&torture_spinlock); 190 return 0; 191 } 192 193 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 194 { 195 const unsigned long shortdelay_us = 2; 196 const unsigned long longdelay_ms = 100; 197 198 /* We want a short delay mostly to emulate likely code, and 199 * we want a long delay occasionally to force massive contention. 200 */ 201 if (!(torture_random(trsp) % 202 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 203 mdelay(longdelay_ms); 204 if (!(torture_random(trsp) % 205 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 206 udelay(shortdelay_us); 207 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 208 torture_preempt_schedule(); /* Allow test to be preempted. */ 209 } 210 211 static void torture_spin_lock_write_unlock(int tid __maybe_unused) 212 __releases(torture_spinlock) 213 { 214 spin_unlock(&torture_spinlock); 215 } 216 217 static struct lock_torture_ops spin_lock_ops = { 218 .writelock = torture_spin_lock_write_lock, 219 .write_delay = torture_spin_lock_write_delay, 220 .task_boost = torture_rt_boost, 221 .writeunlock = torture_spin_lock_write_unlock, 222 .readlock = NULL, 223 .read_delay = NULL, 224 .readunlock = NULL, 225 .name = "spin_lock" 226 }; 227 228 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused) 229 __acquires(torture_spinlock) 230 { 231 unsigned long flags; 232 233 spin_lock_irqsave(&torture_spinlock, flags); 234 cxt.cur_ops->flags = flags; 235 return 0; 236 } 237 238 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused) 239 __releases(torture_spinlock) 240 { 241 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 242 } 243 244 static struct lock_torture_ops spin_lock_irq_ops = { 245 .writelock = torture_spin_lock_write_lock_irq, 246 .write_delay = torture_spin_lock_write_delay, 247 .task_boost = torture_rt_boost, 248 .writeunlock = torture_lock_spin_write_unlock_irq, 249 .readlock = NULL, 250 .read_delay = NULL, 251 .readunlock = NULL, 252 .name = "spin_lock_irq" 253 }; 254 255 static DEFINE_RWLOCK(torture_rwlock); 256 257 static int torture_rwlock_write_lock(int tid __maybe_unused) 258 __acquires(torture_rwlock) 259 { 260 write_lock(&torture_rwlock); 261 return 0; 262 } 263 264 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 265 { 266 const unsigned long shortdelay_us = 2; 267 const unsigned long longdelay_ms = 100; 268 269 /* We want a short delay mostly to emulate likely code, and 270 * we want a long delay occasionally to force massive contention. 271 */ 272 if (!(torture_random(trsp) % 273 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 274 mdelay(longdelay_ms); 275 else 276 udelay(shortdelay_us); 277 } 278 279 static void torture_rwlock_write_unlock(int tid __maybe_unused) 280 __releases(torture_rwlock) 281 { 282 write_unlock(&torture_rwlock); 283 } 284 285 static int torture_rwlock_read_lock(int tid __maybe_unused) 286 __acquires(torture_rwlock) 287 { 288 read_lock(&torture_rwlock); 289 return 0; 290 } 291 292 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 293 { 294 const unsigned long shortdelay_us = 10; 295 const unsigned long longdelay_ms = 100; 296 297 /* We want a short delay mostly to emulate likely code, and 298 * we want a long delay occasionally to force massive contention. 299 */ 300 if (!(torture_random(trsp) % 301 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 302 mdelay(longdelay_ms); 303 else 304 udelay(shortdelay_us); 305 } 306 307 static void torture_rwlock_read_unlock(int tid __maybe_unused) 308 __releases(torture_rwlock) 309 { 310 read_unlock(&torture_rwlock); 311 } 312 313 static struct lock_torture_ops rw_lock_ops = { 314 .writelock = torture_rwlock_write_lock, 315 .write_delay = torture_rwlock_write_delay, 316 .task_boost = torture_rt_boost, 317 .writeunlock = torture_rwlock_write_unlock, 318 .readlock = torture_rwlock_read_lock, 319 .read_delay = torture_rwlock_read_delay, 320 .readunlock = torture_rwlock_read_unlock, 321 .name = "rw_lock" 322 }; 323 324 static int torture_rwlock_write_lock_irq(int tid __maybe_unused) 325 __acquires(torture_rwlock) 326 { 327 unsigned long flags; 328 329 write_lock_irqsave(&torture_rwlock, flags); 330 cxt.cur_ops->flags = flags; 331 return 0; 332 } 333 334 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused) 335 __releases(torture_rwlock) 336 { 337 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 338 } 339 340 static int torture_rwlock_read_lock_irq(int tid __maybe_unused) 341 __acquires(torture_rwlock) 342 { 343 unsigned long flags; 344 345 read_lock_irqsave(&torture_rwlock, flags); 346 cxt.cur_ops->flags = flags; 347 return 0; 348 } 349 350 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused) 351 __releases(torture_rwlock) 352 { 353 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 354 } 355 356 static struct lock_torture_ops rw_lock_irq_ops = { 357 .writelock = torture_rwlock_write_lock_irq, 358 .write_delay = torture_rwlock_write_delay, 359 .task_boost = torture_rt_boost, 360 .writeunlock = torture_rwlock_write_unlock_irq, 361 .readlock = torture_rwlock_read_lock_irq, 362 .read_delay = torture_rwlock_read_delay, 363 .readunlock = torture_rwlock_read_unlock_irq, 364 .name = "rw_lock_irq" 365 }; 366 367 static DEFINE_MUTEX(torture_mutex); 368 369 static int torture_mutex_lock(int tid __maybe_unused) 370 __acquires(torture_mutex) 371 { 372 mutex_lock(&torture_mutex); 373 return 0; 374 } 375 376 static void torture_mutex_delay(struct torture_random_state *trsp) 377 { 378 const unsigned long longdelay_ms = 100; 379 380 /* We want a long delay occasionally to force massive contention. */ 381 if (!(torture_random(trsp) % 382 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 383 mdelay(longdelay_ms * 5); 384 else 385 mdelay(longdelay_ms / 5); 386 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 387 torture_preempt_schedule(); /* Allow test to be preempted. */ 388 } 389 390 static void torture_mutex_unlock(int tid __maybe_unused) 391 __releases(torture_mutex) 392 { 393 mutex_unlock(&torture_mutex); 394 } 395 396 static struct lock_torture_ops mutex_lock_ops = { 397 .writelock = torture_mutex_lock, 398 .write_delay = torture_mutex_delay, 399 .task_boost = torture_rt_boost, 400 .writeunlock = torture_mutex_unlock, 401 .readlock = NULL, 402 .read_delay = NULL, 403 .readunlock = NULL, 404 .name = "mutex_lock" 405 }; 406 407 #include <linux/ww_mutex.h> 408 /* 409 * The torture ww_mutexes should belong to the same lock class as 410 * torture_ww_class to avoid lockdep problem. The ww_mutex_init() 411 * function is called for initialization to ensure that. 412 */ 413 static DEFINE_WD_CLASS(torture_ww_class); 414 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2; 415 static struct ww_acquire_ctx *ww_acquire_ctxs; 416 417 static void torture_ww_mutex_init(void) 418 { 419 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class); 420 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class); 421 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class); 422 423 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress, 424 sizeof(*ww_acquire_ctxs), 425 GFP_KERNEL); 426 if (!ww_acquire_ctxs) 427 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory"); 428 } 429 430 static void torture_ww_mutex_exit(void) 431 { 432 kfree(ww_acquire_ctxs); 433 } 434 435 static int torture_ww_mutex_lock(int tid) 436 __acquires(torture_ww_mutex_0) 437 __acquires(torture_ww_mutex_1) 438 __acquires(torture_ww_mutex_2) 439 { 440 LIST_HEAD(list); 441 struct reorder_lock { 442 struct list_head link; 443 struct ww_mutex *lock; 444 } locks[3], *ll, *ln; 445 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; 446 447 locks[0].lock = &torture_ww_mutex_0; 448 list_add(&locks[0].link, &list); 449 450 locks[1].lock = &torture_ww_mutex_1; 451 list_add(&locks[1].link, &list); 452 453 locks[2].lock = &torture_ww_mutex_2; 454 list_add(&locks[2].link, &list); 455 456 ww_acquire_init(ctx, &torture_ww_class); 457 458 list_for_each_entry(ll, &list, link) { 459 int err; 460 461 err = ww_mutex_lock(ll->lock, ctx); 462 if (!err) 463 continue; 464 465 ln = ll; 466 list_for_each_entry_continue_reverse(ln, &list, link) 467 ww_mutex_unlock(ln->lock); 468 469 if (err != -EDEADLK) 470 return err; 471 472 ww_mutex_lock_slow(ll->lock, ctx); 473 list_move(&ll->link, &list); 474 } 475 476 return 0; 477 } 478 479 static void torture_ww_mutex_unlock(int tid) 480 __releases(torture_ww_mutex_0) 481 __releases(torture_ww_mutex_1) 482 __releases(torture_ww_mutex_2) 483 { 484 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid]; 485 486 ww_mutex_unlock(&torture_ww_mutex_0); 487 ww_mutex_unlock(&torture_ww_mutex_1); 488 ww_mutex_unlock(&torture_ww_mutex_2); 489 ww_acquire_fini(ctx); 490 } 491 492 static struct lock_torture_ops ww_mutex_lock_ops = { 493 .init = torture_ww_mutex_init, 494 .exit = torture_ww_mutex_exit, 495 .writelock = torture_ww_mutex_lock, 496 .write_delay = torture_mutex_delay, 497 .task_boost = torture_rt_boost, 498 .writeunlock = torture_ww_mutex_unlock, 499 .readlock = NULL, 500 .read_delay = NULL, 501 .readunlock = NULL, 502 .name = "ww_mutex_lock" 503 }; 504 505 #ifdef CONFIG_RT_MUTEXES 506 static DEFINE_RT_MUTEX(torture_rtmutex); 507 508 static int torture_rtmutex_lock(int tid __maybe_unused) 509 __acquires(torture_rtmutex) 510 { 511 rt_mutex_lock(&torture_rtmutex); 512 return 0; 513 } 514 515 static void torture_rtmutex_delay(struct torture_random_state *trsp) 516 { 517 const unsigned long shortdelay_us = 2; 518 const unsigned long longdelay_ms = 100; 519 520 /* 521 * We want a short delay mostly to emulate likely code, and 522 * we want a long delay occasionally to force massive contention. 523 */ 524 if (!(torture_random(trsp) % 525 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 526 mdelay(longdelay_ms); 527 if (!(torture_random(trsp) % 528 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 529 udelay(shortdelay_us); 530 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 531 torture_preempt_schedule(); /* Allow test to be preempted. */ 532 } 533 534 static void torture_rtmutex_unlock(int tid __maybe_unused) 535 __releases(torture_rtmutex) 536 { 537 rt_mutex_unlock(&torture_rtmutex); 538 } 539 540 static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) 541 { 542 if (!rt_boost) 543 return; 544 545 __torture_rt_boost(trsp); 546 } 547 548 static struct lock_torture_ops rtmutex_lock_ops = { 549 .writelock = torture_rtmutex_lock, 550 .write_delay = torture_rtmutex_delay, 551 .task_boost = torture_rt_boost_rtmutex, 552 .writeunlock = torture_rtmutex_unlock, 553 .readlock = NULL, 554 .read_delay = NULL, 555 .readunlock = NULL, 556 .name = "rtmutex_lock" 557 }; 558 #endif 559 560 static DECLARE_RWSEM(torture_rwsem); 561 static int torture_rwsem_down_write(int tid __maybe_unused) 562 __acquires(torture_rwsem) 563 { 564 down_write(&torture_rwsem); 565 return 0; 566 } 567 568 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 569 { 570 const unsigned long longdelay_ms = 100; 571 572 /* We want a long delay occasionally to force massive contention. */ 573 if (!(torture_random(trsp) % 574 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 575 mdelay(longdelay_ms * 10); 576 else 577 mdelay(longdelay_ms / 10); 578 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 579 torture_preempt_schedule(); /* Allow test to be preempted. */ 580 } 581 582 static void torture_rwsem_up_write(int tid __maybe_unused) 583 __releases(torture_rwsem) 584 { 585 up_write(&torture_rwsem); 586 } 587 588 static int torture_rwsem_down_read(int tid __maybe_unused) 589 __acquires(torture_rwsem) 590 { 591 down_read(&torture_rwsem); 592 return 0; 593 } 594 595 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 596 { 597 const unsigned long longdelay_ms = 100; 598 599 /* We want a long delay occasionally to force massive contention. */ 600 if (!(torture_random(trsp) % 601 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 602 mdelay(longdelay_ms * 2); 603 else 604 mdelay(longdelay_ms / 2); 605 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 606 torture_preempt_schedule(); /* Allow test to be preempted. */ 607 } 608 609 static void torture_rwsem_up_read(int tid __maybe_unused) 610 __releases(torture_rwsem) 611 { 612 up_read(&torture_rwsem); 613 } 614 615 static struct lock_torture_ops rwsem_lock_ops = { 616 .writelock = torture_rwsem_down_write, 617 .write_delay = torture_rwsem_write_delay, 618 .task_boost = torture_rt_boost, 619 .writeunlock = torture_rwsem_up_write, 620 .readlock = torture_rwsem_down_read, 621 .read_delay = torture_rwsem_read_delay, 622 .readunlock = torture_rwsem_up_read, 623 .name = "rwsem_lock" 624 }; 625 626 #include <linux/percpu-rwsem.h> 627 static struct percpu_rw_semaphore pcpu_rwsem; 628 629 static void torture_percpu_rwsem_init(void) 630 { 631 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 632 } 633 634 static void torture_percpu_rwsem_exit(void) 635 { 636 percpu_free_rwsem(&pcpu_rwsem); 637 } 638 639 static int torture_percpu_rwsem_down_write(int tid __maybe_unused) 640 __acquires(pcpu_rwsem) 641 { 642 percpu_down_write(&pcpu_rwsem); 643 return 0; 644 } 645 646 static void torture_percpu_rwsem_up_write(int tid __maybe_unused) 647 __releases(pcpu_rwsem) 648 { 649 percpu_up_write(&pcpu_rwsem); 650 } 651 652 static int torture_percpu_rwsem_down_read(int tid __maybe_unused) 653 __acquires(pcpu_rwsem) 654 { 655 percpu_down_read(&pcpu_rwsem); 656 return 0; 657 } 658 659 static void torture_percpu_rwsem_up_read(int tid __maybe_unused) 660 __releases(pcpu_rwsem) 661 { 662 percpu_up_read(&pcpu_rwsem); 663 } 664 665 static struct lock_torture_ops percpu_rwsem_lock_ops = { 666 .init = torture_percpu_rwsem_init, 667 .exit = torture_percpu_rwsem_exit, 668 .writelock = torture_percpu_rwsem_down_write, 669 .write_delay = torture_rwsem_write_delay, 670 .task_boost = torture_rt_boost, 671 .writeunlock = torture_percpu_rwsem_up_write, 672 .readlock = torture_percpu_rwsem_down_read, 673 .read_delay = torture_rwsem_read_delay, 674 .readunlock = torture_percpu_rwsem_up_read, 675 .name = "percpu_rwsem_lock" 676 }; 677 678 /* 679 * Lock torture writer kthread. Repeatedly acquires and releases 680 * the lock, checking for duplicate acquisitions. 681 */ 682 static int lock_torture_writer(void *arg) 683 { 684 struct lock_stress_stats *lwsp = arg; 685 int tid = lwsp - cxt.lwsa; 686 DEFINE_TORTURE_RANDOM(rand); 687 688 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 689 set_user_nice(current, MAX_NICE); 690 691 do { 692 if ((torture_random(&rand) & 0xfffff) == 0) 693 schedule_timeout_uninterruptible(1); 694 695 cxt.cur_ops->task_boost(&rand); 696 cxt.cur_ops->writelock(tid); 697 if (WARN_ON_ONCE(lock_is_write_held)) 698 lwsp->n_lock_fail++; 699 lock_is_write_held = true; 700 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held))) 701 lwsp->n_lock_fail++; /* rare, but... */ 702 703 lwsp->n_lock_acquired++; 704 cxt.cur_ops->write_delay(&rand); 705 lock_is_write_held = false; 706 WRITE_ONCE(last_lock_release, jiffies); 707 cxt.cur_ops->writeunlock(tid); 708 709 stutter_wait("lock_torture_writer"); 710 } while (!torture_must_stop()); 711 712 cxt.cur_ops->task_boost(NULL); /* reset prio */ 713 torture_kthread_stopping("lock_torture_writer"); 714 return 0; 715 } 716 717 /* 718 * Lock torture reader kthread. Repeatedly acquires and releases 719 * the reader lock. 720 */ 721 static int lock_torture_reader(void *arg) 722 { 723 struct lock_stress_stats *lrsp = arg; 724 int tid = lrsp - cxt.lrsa; 725 DEFINE_TORTURE_RANDOM(rand); 726 727 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 728 set_user_nice(current, MAX_NICE); 729 730 do { 731 if ((torture_random(&rand) & 0xfffff) == 0) 732 schedule_timeout_uninterruptible(1); 733 734 cxt.cur_ops->readlock(tid); 735 atomic_inc(&lock_is_read_held); 736 if (WARN_ON_ONCE(lock_is_write_held)) 737 lrsp->n_lock_fail++; /* rare, but... */ 738 739 lrsp->n_lock_acquired++; 740 cxt.cur_ops->read_delay(&rand); 741 atomic_dec(&lock_is_read_held); 742 cxt.cur_ops->readunlock(tid); 743 744 stutter_wait("lock_torture_reader"); 745 } while (!torture_must_stop()); 746 torture_kthread_stopping("lock_torture_reader"); 747 return 0; 748 } 749 750 /* 751 * Create an lock-torture-statistics message in the specified buffer. 752 */ 753 static void __torture_print_stats(char *page, 754 struct lock_stress_stats *statp, bool write) 755 { 756 long cur; 757 bool fail = false; 758 int i, n_stress; 759 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0; 760 long long sum = 0; 761 762 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 763 for (i = 0; i < n_stress; i++) { 764 if (data_race(statp[i].n_lock_fail)) 765 fail = true; 766 cur = data_race(statp[i].n_lock_acquired); 767 sum += cur; 768 if (max < cur) 769 max = cur; 770 if (min > cur) 771 min = cur; 772 } 773 page += sprintf(page, 774 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 775 write ? "Writes" : "Reads ", 776 sum, max, min, 777 !onoff_interval && max / 2 > min ? "???" : "", 778 fail, fail ? "!!!" : ""); 779 if (fail) 780 atomic_inc(&cxt.n_lock_torture_errors); 781 } 782 783 /* 784 * Print torture statistics. Caller must ensure that there is only one 785 * call to this function at a given time!!! This is normally accomplished 786 * by relying on the module system to only have one copy of the module 787 * loaded, and then by giving the lock_torture_stats kthread full control 788 * (or the init/cleanup functions when lock_torture_stats thread is not 789 * running). 790 */ 791 static void lock_torture_stats_print(void) 792 { 793 int size = cxt.nrealwriters_stress * 200 + 8192; 794 char *buf; 795 796 if (cxt.cur_ops->readlock) 797 size += cxt.nrealreaders_stress * 200 + 8192; 798 799 buf = kmalloc(size, GFP_KERNEL); 800 if (!buf) { 801 pr_err("lock_torture_stats_print: Out of memory, need: %d", 802 size); 803 return; 804 } 805 806 __torture_print_stats(buf, cxt.lwsa, true); 807 pr_alert("%s", buf); 808 kfree(buf); 809 810 if (cxt.cur_ops->readlock) { 811 buf = kmalloc(size, GFP_KERNEL); 812 if (!buf) { 813 pr_err("lock_torture_stats_print: Out of memory, need: %d", 814 size); 815 return; 816 } 817 818 __torture_print_stats(buf, cxt.lrsa, false); 819 pr_alert("%s", buf); 820 kfree(buf); 821 } 822 } 823 824 /* 825 * Periodically prints torture statistics, if periodic statistics printing 826 * was specified via the stat_interval module parameter. 827 * 828 * No need to worry about fullstop here, since this one doesn't reference 829 * volatile state or register callbacks. 830 */ 831 static int lock_torture_stats(void *arg) 832 { 833 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 834 do { 835 schedule_timeout_interruptible(stat_interval * HZ); 836 lock_torture_stats_print(); 837 torture_shutdown_absorb("lock_torture_stats"); 838 } while (!torture_must_stop()); 839 torture_kthread_stopping("lock_torture_stats"); 840 return 0; 841 } 842 843 static inline void 844 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 845 const char *tag) 846 { 847 pr_alert("%s" TORTURE_FLAG 848 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 849 torture_type, tag, cxt.debug_lock ? " [debug]": "", 850 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 851 verbose, shuffle_interval, stutter, shutdown_secs, 852 onoff_interval, onoff_holdoff); 853 } 854 855 static void lock_torture_cleanup(void) 856 { 857 int i; 858 859 if (torture_cleanup_begin()) 860 return; 861 862 /* 863 * Indicates early cleanup, meaning that the test has not run, 864 * such as when passing bogus args when loading the module. 865 * However cxt->cur_ops.init() may have been invoked, so beside 866 * perform the underlying torture-specific cleanups, cur_ops.exit() 867 * will be invoked if needed. 868 */ 869 if (!cxt.lwsa && !cxt.lrsa) 870 goto end; 871 872 if (writer_tasks) { 873 for (i = 0; i < cxt.nrealwriters_stress; i++) 874 torture_stop_kthread(lock_torture_writer, 875 writer_tasks[i]); 876 kfree(writer_tasks); 877 writer_tasks = NULL; 878 } 879 880 if (reader_tasks) { 881 for (i = 0; i < cxt.nrealreaders_stress; i++) 882 torture_stop_kthread(lock_torture_reader, 883 reader_tasks[i]); 884 kfree(reader_tasks); 885 reader_tasks = NULL; 886 } 887 888 torture_stop_kthread(lock_torture_stats, stats_task); 889 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 890 891 if (atomic_read(&cxt.n_lock_torture_errors)) 892 lock_torture_print_module_parms(cxt.cur_ops, 893 "End of test: FAILURE"); 894 else if (torture_onoff_failures()) 895 lock_torture_print_module_parms(cxt.cur_ops, 896 "End of test: LOCK_HOTPLUG"); 897 else 898 lock_torture_print_module_parms(cxt.cur_ops, 899 "End of test: SUCCESS"); 900 901 kfree(cxt.lwsa); 902 cxt.lwsa = NULL; 903 kfree(cxt.lrsa); 904 cxt.lrsa = NULL; 905 906 end: 907 if (cxt.init_called) { 908 if (cxt.cur_ops->exit) 909 cxt.cur_ops->exit(); 910 cxt.init_called = false; 911 } 912 torture_cleanup_end(); 913 } 914 915 static int __init lock_torture_init(void) 916 { 917 int i, j; 918 int firsterr = 0; 919 static struct lock_torture_ops *torture_ops[] = { 920 &lock_busted_ops, 921 &spin_lock_ops, &spin_lock_irq_ops, 922 &rw_lock_ops, &rw_lock_irq_ops, 923 &mutex_lock_ops, 924 &ww_mutex_lock_ops, 925 #ifdef CONFIG_RT_MUTEXES 926 &rtmutex_lock_ops, 927 #endif 928 &rwsem_lock_ops, 929 &percpu_rwsem_lock_ops, 930 }; 931 932 if (!torture_init_begin(torture_type, verbose)) 933 return -EBUSY; 934 935 /* Process args and tell the world that the torturer is on the job. */ 936 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 937 cxt.cur_ops = torture_ops[i]; 938 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 939 break; 940 } 941 if (i == ARRAY_SIZE(torture_ops)) { 942 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 943 torture_type); 944 pr_alert("lock-torture types:"); 945 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 946 pr_alert(" %s", torture_ops[i]->name); 947 pr_alert("\n"); 948 firsterr = -EINVAL; 949 goto unwind; 950 } 951 952 if (nwriters_stress == 0 && 953 (!cxt.cur_ops->readlock || nreaders_stress == 0)) { 954 pr_alert("lock-torture: must run at least one locking thread\n"); 955 firsterr = -EINVAL; 956 goto unwind; 957 } 958 959 if (nwriters_stress >= 0) 960 cxt.nrealwriters_stress = nwriters_stress; 961 else 962 cxt.nrealwriters_stress = 2 * num_online_cpus(); 963 964 if (cxt.cur_ops->init) { 965 cxt.cur_ops->init(); 966 cxt.init_called = true; 967 } 968 969 #ifdef CONFIG_DEBUG_MUTEXES 970 if (str_has_prefix(torture_type, "mutex")) 971 cxt.debug_lock = true; 972 #endif 973 #ifdef CONFIG_DEBUG_RT_MUTEXES 974 if (str_has_prefix(torture_type, "rtmutex")) 975 cxt.debug_lock = true; 976 #endif 977 #ifdef CONFIG_DEBUG_SPINLOCK 978 if ((str_has_prefix(torture_type, "spin")) || 979 (str_has_prefix(torture_type, "rw_lock"))) 980 cxt.debug_lock = true; 981 #endif 982 983 /* Initialize the statistics so that each run gets its own numbers. */ 984 if (nwriters_stress) { 985 lock_is_write_held = false; 986 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress, 987 sizeof(*cxt.lwsa), 988 GFP_KERNEL); 989 if (cxt.lwsa == NULL) { 990 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 991 firsterr = -ENOMEM; 992 goto unwind; 993 } 994 995 for (i = 0; i < cxt.nrealwriters_stress; i++) { 996 cxt.lwsa[i].n_lock_fail = 0; 997 cxt.lwsa[i].n_lock_acquired = 0; 998 } 999 } 1000 1001 if (cxt.cur_ops->readlock) { 1002 if (nreaders_stress >= 0) 1003 cxt.nrealreaders_stress = nreaders_stress; 1004 else { 1005 /* 1006 * By default distribute evenly the number of 1007 * readers and writers. We still run the same number 1008 * of threads as the writer-only locks default. 1009 */ 1010 if (nwriters_stress < 0) /* user doesn't care */ 1011 cxt.nrealwriters_stress = num_online_cpus(); 1012 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 1013 } 1014 1015 if (nreaders_stress) { 1016 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress, 1017 sizeof(*cxt.lrsa), 1018 GFP_KERNEL); 1019 if (cxt.lrsa == NULL) { 1020 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 1021 firsterr = -ENOMEM; 1022 kfree(cxt.lwsa); 1023 cxt.lwsa = NULL; 1024 goto unwind; 1025 } 1026 1027 for (i = 0; i < cxt.nrealreaders_stress; i++) { 1028 cxt.lrsa[i].n_lock_fail = 0; 1029 cxt.lrsa[i].n_lock_acquired = 0; 1030 } 1031 } 1032 } 1033 1034 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 1035 1036 /* Prepare torture context. */ 1037 if (onoff_interval > 0) { 1038 firsterr = torture_onoff_init(onoff_holdoff * HZ, 1039 onoff_interval * HZ, NULL); 1040 if (torture_init_error(firsterr)) 1041 goto unwind; 1042 } 1043 if (shuffle_interval > 0) { 1044 firsterr = torture_shuffle_init(shuffle_interval); 1045 if (torture_init_error(firsterr)) 1046 goto unwind; 1047 } 1048 if (shutdown_secs > 0) { 1049 firsterr = torture_shutdown_init(shutdown_secs, 1050 lock_torture_cleanup); 1051 if (torture_init_error(firsterr)) 1052 goto unwind; 1053 } 1054 if (stutter > 0) { 1055 firsterr = torture_stutter_init(stutter, stutter); 1056 if (torture_init_error(firsterr)) 1057 goto unwind; 1058 } 1059 1060 if (nwriters_stress) { 1061 writer_tasks = kcalloc(cxt.nrealwriters_stress, 1062 sizeof(writer_tasks[0]), 1063 GFP_KERNEL); 1064 if (writer_tasks == NULL) { 1065 TOROUT_ERRSTRING("writer_tasks: Out of memory"); 1066 firsterr = -ENOMEM; 1067 goto unwind; 1068 } 1069 } 1070 1071 if (cxt.cur_ops->readlock) { 1072 reader_tasks = kcalloc(cxt.nrealreaders_stress, 1073 sizeof(reader_tasks[0]), 1074 GFP_KERNEL); 1075 if (reader_tasks == NULL) { 1076 TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1077 kfree(writer_tasks); 1078 writer_tasks = NULL; 1079 firsterr = -ENOMEM; 1080 goto unwind; 1081 } 1082 } 1083 1084 /* 1085 * Create the kthreads and start torturing (oh, those poor little locks). 1086 * 1087 * TODO: Note that we interleave writers with readers, giving writers a 1088 * slight advantage, by creating its kthread first. This can be modified 1089 * for very specific needs, or even let the user choose the policy, if 1090 * ever wanted. 1091 */ 1092 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1093 j < cxt.nrealreaders_stress; i++, j++) { 1094 if (i >= cxt.nrealwriters_stress) 1095 goto create_reader; 1096 1097 /* Create writer. */ 1098 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1099 writer_tasks[i]); 1100 if (torture_init_error(firsterr)) 1101 goto unwind; 1102 1103 create_reader: 1104 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1105 continue; 1106 /* Create reader. */ 1107 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1108 reader_tasks[j]); 1109 if (torture_init_error(firsterr)) 1110 goto unwind; 1111 } 1112 if (stat_interval > 0) { 1113 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1114 stats_task); 1115 if (torture_init_error(firsterr)) 1116 goto unwind; 1117 } 1118 torture_init_end(); 1119 return 0; 1120 1121 unwind: 1122 torture_init_end(); 1123 lock_torture_cleanup(); 1124 if (shutdown_secs) { 1125 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST)); 1126 kernel_power_off(); 1127 } 1128 return firsterr; 1129 } 1130 1131 module_init(lock_torture_init); 1132 module_exit(lock_torture_cleanup); 1133