1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/kthread.h> 27 #include <linux/sched/rt.h> 28 #include <linux/spinlock.h> 29 #include <linux/rwlock.h> 30 #include <linux/mutex.h> 31 #include <linux/rwsem.h> 32 #include <linux/smp.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <uapi/linux/sched/types.h> 36 #include <linux/rtmutex.h> 37 #include <linux/atomic.h> 38 #include <linux/moduleparam.h> 39 #include <linux/delay.h> 40 #include <linux/slab.h> 41 #include <linux/percpu-rwsem.h> 42 #include <linux/torture.h> 43 44 MODULE_LICENSE("GPL"); 45 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 46 47 torture_param(int, nwriters_stress, -1, 48 "Number of write-locking stress-test threads"); 49 torture_param(int, nreaders_stress, -1, 50 "Number of read-locking stress-test threads"); 51 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 52 torture_param(int, onoff_interval, 0, 53 "Time between CPU hotplugs (s), 0=disable"); 54 torture_param(int, shuffle_interval, 3, 55 "Number of jiffies between shuffles, 0=disable"); 56 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 57 torture_param(int, stat_interval, 60, 58 "Number of seconds between stats printk()s"); 59 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 60 torture_param(bool, verbose, true, 61 "Enable verbose debugging printk()s"); 62 63 static char *torture_type = "spin_lock"; 64 module_param(torture_type, charp, 0444); 65 MODULE_PARM_DESC(torture_type, 66 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 67 68 static struct task_struct *stats_task; 69 static struct task_struct **writer_tasks; 70 static struct task_struct **reader_tasks; 71 72 static bool lock_is_write_held; 73 static bool lock_is_read_held; 74 75 struct lock_stress_stats { 76 long n_lock_fail; 77 long n_lock_acquired; 78 }; 79 80 int torture_runnable = IS_ENABLED(MODULE); 81 module_param(torture_runnable, int, 0444); 82 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); 83 84 /* Forward reference. */ 85 static void lock_torture_cleanup(void); 86 87 /* 88 * Operations vector for selecting different types of tests. 89 */ 90 struct lock_torture_ops { 91 void (*init)(void); 92 int (*writelock)(void); 93 void (*write_delay)(struct torture_random_state *trsp); 94 void (*task_boost)(struct torture_random_state *trsp); 95 void (*writeunlock)(void); 96 int (*readlock)(void); 97 void (*read_delay)(struct torture_random_state *trsp); 98 void (*readunlock)(void); 99 100 unsigned long flags; /* for irq spinlocks */ 101 const char *name; 102 }; 103 104 struct lock_torture_cxt { 105 int nrealwriters_stress; 106 int nrealreaders_stress; 107 bool debug_lock; 108 atomic_t n_lock_torture_errors; 109 struct lock_torture_ops *cur_ops; 110 struct lock_stress_stats *lwsa; /* writer statistics */ 111 struct lock_stress_stats *lrsa; /* reader statistics */ 112 }; 113 static struct lock_torture_cxt cxt = { 0, 0, false, 114 ATOMIC_INIT(0), 115 NULL, NULL}; 116 /* 117 * Definitions for lock torture testing. 118 */ 119 120 static int torture_lock_busted_write_lock(void) 121 { 122 return 0; /* BUGGY, do not use in real life!!! */ 123 } 124 125 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 126 { 127 const unsigned long longdelay_ms = 100; 128 129 /* We want a long delay occasionally to force massive contention. */ 130 if (!(torture_random(trsp) % 131 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 132 mdelay(longdelay_ms); 133 #ifdef CONFIG_PREEMPT 134 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 135 preempt_schedule(); /* Allow test to be preempted. */ 136 #endif 137 } 138 139 static void torture_lock_busted_write_unlock(void) 140 { 141 /* BUGGY, do not use in real life!!! */ 142 } 143 144 static void torture_boost_dummy(struct torture_random_state *trsp) 145 { 146 /* Only rtmutexes care about priority */ 147 } 148 149 static struct lock_torture_ops lock_busted_ops = { 150 .writelock = torture_lock_busted_write_lock, 151 .write_delay = torture_lock_busted_write_delay, 152 .task_boost = torture_boost_dummy, 153 .writeunlock = torture_lock_busted_write_unlock, 154 .readlock = NULL, 155 .read_delay = NULL, 156 .readunlock = NULL, 157 .name = "lock_busted" 158 }; 159 160 static DEFINE_SPINLOCK(torture_spinlock); 161 162 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 163 { 164 spin_lock(&torture_spinlock); 165 return 0; 166 } 167 168 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 169 { 170 const unsigned long shortdelay_us = 2; 171 const unsigned long longdelay_ms = 100; 172 173 /* We want a short delay mostly to emulate likely code, and 174 * we want a long delay occasionally to force massive contention. 175 */ 176 if (!(torture_random(trsp) % 177 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 178 mdelay(longdelay_ms); 179 if (!(torture_random(trsp) % 180 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 181 udelay(shortdelay_us); 182 #ifdef CONFIG_PREEMPT 183 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 184 preempt_schedule(); /* Allow test to be preempted. */ 185 #endif 186 } 187 188 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 189 { 190 spin_unlock(&torture_spinlock); 191 } 192 193 static struct lock_torture_ops spin_lock_ops = { 194 .writelock = torture_spin_lock_write_lock, 195 .write_delay = torture_spin_lock_write_delay, 196 .task_boost = torture_boost_dummy, 197 .writeunlock = torture_spin_lock_write_unlock, 198 .readlock = NULL, 199 .read_delay = NULL, 200 .readunlock = NULL, 201 .name = "spin_lock" 202 }; 203 204 static int torture_spin_lock_write_lock_irq(void) 205 __acquires(torture_spinlock) 206 { 207 unsigned long flags; 208 209 spin_lock_irqsave(&torture_spinlock, flags); 210 cxt.cur_ops->flags = flags; 211 return 0; 212 } 213 214 static void torture_lock_spin_write_unlock_irq(void) 215 __releases(torture_spinlock) 216 { 217 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 218 } 219 220 static struct lock_torture_ops spin_lock_irq_ops = { 221 .writelock = torture_spin_lock_write_lock_irq, 222 .write_delay = torture_spin_lock_write_delay, 223 .task_boost = torture_boost_dummy, 224 .writeunlock = torture_lock_spin_write_unlock_irq, 225 .readlock = NULL, 226 .read_delay = NULL, 227 .readunlock = NULL, 228 .name = "spin_lock_irq" 229 }; 230 231 static DEFINE_RWLOCK(torture_rwlock); 232 233 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 234 { 235 write_lock(&torture_rwlock); 236 return 0; 237 } 238 239 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 240 { 241 const unsigned long shortdelay_us = 2; 242 const unsigned long longdelay_ms = 100; 243 244 /* We want a short delay mostly to emulate likely code, and 245 * we want a long delay occasionally to force massive contention. 246 */ 247 if (!(torture_random(trsp) % 248 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 249 mdelay(longdelay_ms); 250 else 251 udelay(shortdelay_us); 252 } 253 254 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 255 { 256 write_unlock(&torture_rwlock); 257 } 258 259 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 260 { 261 read_lock(&torture_rwlock); 262 return 0; 263 } 264 265 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 266 { 267 const unsigned long shortdelay_us = 10; 268 const unsigned long longdelay_ms = 100; 269 270 /* We want a short delay mostly to emulate likely code, and 271 * we want a long delay occasionally to force massive contention. 272 */ 273 if (!(torture_random(trsp) % 274 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 275 mdelay(longdelay_ms); 276 else 277 udelay(shortdelay_us); 278 } 279 280 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 281 { 282 read_unlock(&torture_rwlock); 283 } 284 285 static struct lock_torture_ops rw_lock_ops = { 286 .writelock = torture_rwlock_write_lock, 287 .write_delay = torture_rwlock_write_delay, 288 .task_boost = torture_boost_dummy, 289 .writeunlock = torture_rwlock_write_unlock, 290 .readlock = torture_rwlock_read_lock, 291 .read_delay = torture_rwlock_read_delay, 292 .readunlock = torture_rwlock_read_unlock, 293 .name = "rw_lock" 294 }; 295 296 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 297 { 298 unsigned long flags; 299 300 write_lock_irqsave(&torture_rwlock, flags); 301 cxt.cur_ops->flags = flags; 302 return 0; 303 } 304 305 static void torture_rwlock_write_unlock_irq(void) 306 __releases(torture_rwlock) 307 { 308 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 309 } 310 311 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 312 { 313 unsigned long flags; 314 315 read_lock_irqsave(&torture_rwlock, flags); 316 cxt.cur_ops->flags = flags; 317 return 0; 318 } 319 320 static void torture_rwlock_read_unlock_irq(void) 321 __releases(torture_rwlock) 322 { 323 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 324 } 325 326 static struct lock_torture_ops rw_lock_irq_ops = { 327 .writelock = torture_rwlock_write_lock_irq, 328 .write_delay = torture_rwlock_write_delay, 329 .task_boost = torture_boost_dummy, 330 .writeunlock = torture_rwlock_write_unlock_irq, 331 .readlock = torture_rwlock_read_lock_irq, 332 .read_delay = torture_rwlock_read_delay, 333 .readunlock = torture_rwlock_read_unlock_irq, 334 .name = "rw_lock_irq" 335 }; 336 337 static DEFINE_MUTEX(torture_mutex); 338 339 static int torture_mutex_lock(void) __acquires(torture_mutex) 340 { 341 mutex_lock(&torture_mutex); 342 return 0; 343 } 344 345 static void torture_mutex_delay(struct torture_random_state *trsp) 346 { 347 const unsigned long longdelay_ms = 100; 348 349 /* We want a long delay occasionally to force massive contention. */ 350 if (!(torture_random(trsp) % 351 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 352 mdelay(longdelay_ms * 5); 353 else 354 mdelay(longdelay_ms / 5); 355 #ifdef CONFIG_PREEMPT 356 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 357 preempt_schedule(); /* Allow test to be preempted. */ 358 #endif 359 } 360 361 static void torture_mutex_unlock(void) __releases(torture_mutex) 362 { 363 mutex_unlock(&torture_mutex); 364 } 365 366 static struct lock_torture_ops mutex_lock_ops = { 367 .writelock = torture_mutex_lock, 368 .write_delay = torture_mutex_delay, 369 .task_boost = torture_boost_dummy, 370 .writeunlock = torture_mutex_unlock, 371 .readlock = NULL, 372 .read_delay = NULL, 373 .readunlock = NULL, 374 .name = "mutex_lock" 375 }; 376 377 #include <linux/ww_mutex.h> 378 static DEFINE_WW_CLASS(torture_ww_class); 379 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); 380 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); 381 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); 382 383 static int torture_ww_mutex_lock(void) 384 __acquires(torture_ww_mutex_0) 385 __acquires(torture_ww_mutex_1) 386 __acquires(torture_ww_mutex_2) 387 { 388 LIST_HEAD(list); 389 struct reorder_lock { 390 struct list_head link; 391 struct ww_mutex *lock; 392 } locks[3], *ll, *ln; 393 struct ww_acquire_ctx ctx; 394 395 locks[0].lock = &torture_ww_mutex_0; 396 list_add(&locks[0].link, &list); 397 398 locks[1].lock = &torture_ww_mutex_1; 399 list_add(&locks[1].link, &list); 400 401 locks[2].lock = &torture_ww_mutex_2; 402 list_add(&locks[2].link, &list); 403 404 ww_acquire_init(&ctx, &torture_ww_class); 405 406 list_for_each_entry(ll, &list, link) { 407 int err; 408 409 err = ww_mutex_lock(ll->lock, &ctx); 410 if (!err) 411 continue; 412 413 ln = ll; 414 list_for_each_entry_continue_reverse(ln, &list, link) 415 ww_mutex_unlock(ln->lock); 416 417 if (err != -EDEADLK) 418 return err; 419 420 ww_mutex_lock_slow(ll->lock, &ctx); 421 list_move(&ll->link, &list); 422 } 423 424 ww_acquire_fini(&ctx); 425 return 0; 426 } 427 428 static void torture_ww_mutex_unlock(void) 429 __releases(torture_ww_mutex_0) 430 __releases(torture_ww_mutex_1) 431 __releases(torture_ww_mutex_2) 432 { 433 ww_mutex_unlock(&torture_ww_mutex_0); 434 ww_mutex_unlock(&torture_ww_mutex_1); 435 ww_mutex_unlock(&torture_ww_mutex_2); 436 } 437 438 static struct lock_torture_ops ww_mutex_lock_ops = { 439 .writelock = torture_ww_mutex_lock, 440 .write_delay = torture_mutex_delay, 441 .task_boost = torture_boost_dummy, 442 .writeunlock = torture_ww_mutex_unlock, 443 .readlock = NULL, 444 .read_delay = NULL, 445 .readunlock = NULL, 446 .name = "ww_mutex_lock" 447 }; 448 449 #ifdef CONFIG_RT_MUTEXES 450 static DEFINE_RT_MUTEX(torture_rtmutex); 451 452 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 453 { 454 rt_mutex_lock(&torture_rtmutex); 455 return 0; 456 } 457 458 static void torture_rtmutex_boost(struct torture_random_state *trsp) 459 { 460 int policy; 461 struct sched_param param; 462 const unsigned int factor = 50000; /* yes, quite arbitrary */ 463 464 if (!rt_task(current)) { 465 /* 466 * Boost priority once every ~50k operations. When the 467 * task tries to take the lock, the rtmutex it will account 468 * for the new priority, and do any corresponding pi-dance. 469 */ 470 if (trsp && !(torture_random(trsp) % 471 (cxt.nrealwriters_stress * factor))) { 472 policy = SCHED_FIFO; 473 param.sched_priority = MAX_RT_PRIO - 1; 474 } else /* common case, do nothing */ 475 return; 476 } else { 477 /* 478 * The task will remain boosted for another ~500k operations, 479 * then restored back to its original prio, and so forth. 480 * 481 * When @trsp is nil, we want to force-reset the task for 482 * stopping the kthread. 483 */ 484 if (!trsp || !(torture_random(trsp) % 485 (cxt.nrealwriters_stress * factor * 2))) { 486 policy = SCHED_NORMAL; 487 param.sched_priority = 0; 488 } else /* common case, do nothing */ 489 return; 490 } 491 492 sched_setscheduler_nocheck(current, policy, ¶m); 493 } 494 495 static void torture_rtmutex_delay(struct torture_random_state *trsp) 496 { 497 const unsigned long shortdelay_us = 2; 498 const unsigned long longdelay_ms = 100; 499 500 /* 501 * We want a short delay mostly to emulate likely code, and 502 * we want a long delay occasionally to force massive contention. 503 */ 504 if (!(torture_random(trsp) % 505 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 506 mdelay(longdelay_ms); 507 if (!(torture_random(trsp) % 508 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 509 udelay(shortdelay_us); 510 #ifdef CONFIG_PREEMPT 511 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 512 preempt_schedule(); /* Allow test to be preempted. */ 513 #endif 514 } 515 516 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 517 { 518 rt_mutex_unlock(&torture_rtmutex); 519 } 520 521 static struct lock_torture_ops rtmutex_lock_ops = { 522 .writelock = torture_rtmutex_lock, 523 .write_delay = torture_rtmutex_delay, 524 .task_boost = torture_rtmutex_boost, 525 .writeunlock = torture_rtmutex_unlock, 526 .readlock = NULL, 527 .read_delay = NULL, 528 .readunlock = NULL, 529 .name = "rtmutex_lock" 530 }; 531 #endif 532 533 static DECLARE_RWSEM(torture_rwsem); 534 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 535 { 536 down_write(&torture_rwsem); 537 return 0; 538 } 539 540 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 541 { 542 const unsigned long longdelay_ms = 100; 543 544 /* We want a long delay occasionally to force massive contention. */ 545 if (!(torture_random(trsp) % 546 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 547 mdelay(longdelay_ms * 10); 548 else 549 mdelay(longdelay_ms / 10); 550 #ifdef CONFIG_PREEMPT 551 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 552 preempt_schedule(); /* Allow test to be preempted. */ 553 #endif 554 } 555 556 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 557 { 558 up_write(&torture_rwsem); 559 } 560 561 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 562 { 563 down_read(&torture_rwsem); 564 return 0; 565 } 566 567 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 568 { 569 const unsigned long longdelay_ms = 100; 570 571 /* We want a long delay occasionally to force massive contention. */ 572 if (!(torture_random(trsp) % 573 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 574 mdelay(longdelay_ms * 2); 575 else 576 mdelay(longdelay_ms / 2); 577 #ifdef CONFIG_PREEMPT 578 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 579 preempt_schedule(); /* Allow test to be preempted. */ 580 #endif 581 } 582 583 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 584 { 585 up_read(&torture_rwsem); 586 } 587 588 static struct lock_torture_ops rwsem_lock_ops = { 589 .writelock = torture_rwsem_down_write, 590 .write_delay = torture_rwsem_write_delay, 591 .task_boost = torture_boost_dummy, 592 .writeunlock = torture_rwsem_up_write, 593 .readlock = torture_rwsem_down_read, 594 .read_delay = torture_rwsem_read_delay, 595 .readunlock = torture_rwsem_up_read, 596 .name = "rwsem_lock" 597 }; 598 599 #include <linux/percpu-rwsem.h> 600 static struct percpu_rw_semaphore pcpu_rwsem; 601 602 void torture_percpu_rwsem_init(void) 603 { 604 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 605 } 606 607 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 608 { 609 percpu_down_write(&pcpu_rwsem); 610 return 0; 611 } 612 613 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 614 { 615 percpu_up_write(&pcpu_rwsem); 616 } 617 618 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 619 { 620 percpu_down_read(&pcpu_rwsem); 621 return 0; 622 } 623 624 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 625 { 626 percpu_up_read(&pcpu_rwsem); 627 } 628 629 static struct lock_torture_ops percpu_rwsem_lock_ops = { 630 .init = torture_percpu_rwsem_init, 631 .writelock = torture_percpu_rwsem_down_write, 632 .write_delay = torture_rwsem_write_delay, 633 .task_boost = torture_boost_dummy, 634 .writeunlock = torture_percpu_rwsem_up_write, 635 .readlock = torture_percpu_rwsem_down_read, 636 .read_delay = torture_rwsem_read_delay, 637 .readunlock = torture_percpu_rwsem_up_read, 638 .name = "percpu_rwsem_lock" 639 }; 640 641 /* 642 * Lock torture writer kthread. Repeatedly acquires and releases 643 * the lock, checking for duplicate acquisitions. 644 */ 645 static int lock_torture_writer(void *arg) 646 { 647 struct lock_stress_stats *lwsp = arg; 648 static DEFINE_TORTURE_RANDOM(rand); 649 650 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 651 set_user_nice(current, MAX_NICE); 652 653 do { 654 if ((torture_random(&rand) & 0xfffff) == 0) 655 schedule_timeout_uninterruptible(1); 656 657 cxt.cur_ops->task_boost(&rand); 658 cxt.cur_ops->writelock(); 659 if (WARN_ON_ONCE(lock_is_write_held)) 660 lwsp->n_lock_fail++; 661 lock_is_write_held = 1; 662 if (WARN_ON_ONCE(lock_is_read_held)) 663 lwsp->n_lock_fail++; /* rare, but... */ 664 665 lwsp->n_lock_acquired++; 666 cxt.cur_ops->write_delay(&rand); 667 lock_is_write_held = 0; 668 cxt.cur_ops->writeunlock(); 669 670 stutter_wait("lock_torture_writer"); 671 } while (!torture_must_stop()); 672 673 cxt.cur_ops->task_boost(NULL); /* reset prio */ 674 torture_kthread_stopping("lock_torture_writer"); 675 return 0; 676 } 677 678 /* 679 * Lock torture reader kthread. Repeatedly acquires and releases 680 * the reader lock. 681 */ 682 static int lock_torture_reader(void *arg) 683 { 684 struct lock_stress_stats *lrsp = arg; 685 static DEFINE_TORTURE_RANDOM(rand); 686 687 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 688 set_user_nice(current, MAX_NICE); 689 690 do { 691 if ((torture_random(&rand) & 0xfffff) == 0) 692 schedule_timeout_uninterruptible(1); 693 694 cxt.cur_ops->readlock(); 695 lock_is_read_held = 1; 696 if (WARN_ON_ONCE(lock_is_write_held)) 697 lrsp->n_lock_fail++; /* rare, but... */ 698 699 lrsp->n_lock_acquired++; 700 cxt.cur_ops->read_delay(&rand); 701 lock_is_read_held = 0; 702 cxt.cur_ops->readunlock(); 703 704 stutter_wait("lock_torture_reader"); 705 } while (!torture_must_stop()); 706 torture_kthread_stopping("lock_torture_reader"); 707 return 0; 708 } 709 710 /* 711 * Create an lock-torture-statistics message in the specified buffer. 712 */ 713 static void __torture_print_stats(char *page, 714 struct lock_stress_stats *statp, bool write) 715 { 716 bool fail = 0; 717 int i, n_stress; 718 long max = 0; 719 long min = statp[0].n_lock_acquired; 720 long long sum = 0; 721 722 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 723 for (i = 0; i < n_stress; i++) { 724 if (statp[i].n_lock_fail) 725 fail = true; 726 sum += statp[i].n_lock_acquired; 727 if (max < statp[i].n_lock_fail) 728 max = statp[i].n_lock_fail; 729 if (min > statp[i].n_lock_fail) 730 min = statp[i].n_lock_fail; 731 } 732 page += sprintf(page, 733 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 734 write ? "Writes" : "Reads ", 735 sum, max, min, max / 2 > min ? "???" : "", 736 fail, fail ? "!!!" : ""); 737 if (fail) 738 atomic_inc(&cxt.n_lock_torture_errors); 739 } 740 741 /* 742 * Print torture statistics. Caller must ensure that there is only one 743 * call to this function at a given time!!! This is normally accomplished 744 * by relying on the module system to only have one copy of the module 745 * loaded, and then by giving the lock_torture_stats kthread full control 746 * (or the init/cleanup functions when lock_torture_stats thread is not 747 * running). 748 */ 749 static void lock_torture_stats_print(void) 750 { 751 int size = cxt.nrealwriters_stress * 200 + 8192; 752 char *buf; 753 754 if (cxt.cur_ops->readlock) 755 size += cxt.nrealreaders_stress * 200 + 8192; 756 757 buf = kmalloc(size, GFP_KERNEL); 758 if (!buf) { 759 pr_err("lock_torture_stats_print: Out of memory, need: %d", 760 size); 761 return; 762 } 763 764 __torture_print_stats(buf, cxt.lwsa, true); 765 pr_alert("%s", buf); 766 kfree(buf); 767 768 if (cxt.cur_ops->readlock) { 769 buf = kmalloc(size, GFP_KERNEL); 770 if (!buf) { 771 pr_err("lock_torture_stats_print: Out of memory, need: %d", 772 size); 773 return; 774 } 775 776 __torture_print_stats(buf, cxt.lrsa, false); 777 pr_alert("%s", buf); 778 kfree(buf); 779 } 780 } 781 782 /* 783 * Periodically prints torture statistics, if periodic statistics printing 784 * was specified via the stat_interval module parameter. 785 * 786 * No need to worry about fullstop here, since this one doesn't reference 787 * volatile state or register callbacks. 788 */ 789 static int lock_torture_stats(void *arg) 790 { 791 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 792 do { 793 schedule_timeout_interruptible(stat_interval * HZ); 794 lock_torture_stats_print(); 795 torture_shutdown_absorb("lock_torture_stats"); 796 } while (!torture_must_stop()); 797 torture_kthread_stopping("lock_torture_stats"); 798 return 0; 799 } 800 801 static inline void 802 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 803 const char *tag) 804 { 805 pr_alert("%s" TORTURE_FLAG 806 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 807 torture_type, tag, cxt.debug_lock ? " [debug]": "", 808 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 809 verbose, shuffle_interval, stutter, shutdown_secs, 810 onoff_interval, onoff_holdoff); 811 } 812 813 static void lock_torture_cleanup(void) 814 { 815 int i; 816 817 if (torture_cleanup_begin()) 818 return; 819 820 /* 821 * Indicates early cleanup, meaning that the test has not run, 822 * such as when passing bogus args when loading the module. As 823 * such, only perform the underlying torture-specific cleanups, 824 * and avoid anything related to locktorture. 825 */ 826 if (!cxt.lwsa) 827 goto end; 828 829 if (writer_tasks) { 830 for (i = 0; i < cxt.nrealwriters_stress; i++) 831 torture_stop_kthread(lock_torture_writer, 832 writer_tasks[i]); 833 kfree(writer_tasks); 834 writer_tasks = NULL; 835 } 836 837 if (reader_tasks) { 838 for (i = 0; i < cxt.nrealreaders_stress; i++) 839 torture_stop_kthread(lock_torture_reader, 840 reader_tasks[i]); 841 kfree(reader_tasks); 842 reader_tasks = NULL; 843 } 844 845 torture_stop_kthread(lock_torture_stats, stats_task); 846 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 847 848 if (atomic_read(&cxt.n_lock_torture_errors)) 849 lock_torture_print_module_parms(cxt.cur_ops, 850 "End of test: FAILURE"); 851 else if (torture_onoff_failures()) 852 lock_torture_print_module_parms(cxt.cur_ops, 853 "End of test: LOCK_HOTPLUG"); 854 else 855 lock_torture_print_module_parms(cxt.cur_ops, 856 "End of test: SUCCESS"); 857 858 kfree(cxt.lwsa); 859 kfree(cxt.lrsa); 860 861 end: 862 torture_cleanup_end(); 863 } 864 865 static int __init lock_torture_init(void) 866 { 867 int i, j; 868 int firsterr = 0; 869 static struct lock_torture_ops *torture_ops[] = { 870 &lock_busted_ops, 871 &spin_lock_ops, &spin_lock_irq_ops, 872 &rw_lock_ops, &rw_lock_irq_ops, 873 &mutex_lock_ops, 874 &ww_mutex_lock_ops, 875 #ifdef CONFIG_RT_MUTEXES 876 &rtmutex_lock_ops, 877 #endif 878 &rwsem_lock_ops, 879 &percpu_rwsem_lock_ops, 880 }; 881 882 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 883 return -EBUSY; 884 885 /* Process args and tell the world that the torturer is on the job. */ 886 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 887 cxt.cur_ops = torture_ops[i]; 888 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 889 break; 890 } 891 if (i == ARRAY_SIZE(torture_ops)) { 892 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 893 torture_type); 894 pr_alert("lock-torture types:"); 895 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 896 pr_alert(" %s", torture_ops[i]->name); 897 pr_alert("\n"); 898 firsterr = -EINVAL; 899 goto unwind; 900 } 901 if (cxt.cur_ops->init) 902 cxt.cur_ops->init(); 903 904 if (nwriters_stress >= 0) 905 cxt.nrealwriters_stress = nwriters_stress; 906 else 907 cxt.nrealwriters_stress = 2 * num_online_cpus(); 908 909 #ifdef CONFIG_DEBUG_MUTEXES 910 if (strncmp(torture_type, "mutex", 5) == 0) 911 cxt.debug_lock = true; 912 #endif 913 #ifdef CONFIG_DEBUG_RT_MUTEXES 914 if (strncmp(torture_type, "rtmutex", 7) == 0) 915 cxt.debug_lock = true; 916 #endif 917 #ifdef CONFIG_DEBUG_SPINLOCK 918 if ((strncmp(torture_type, "spin", 4) == 0) || 919 (strncmp(torture_type, "rw_lock", 7) == 0)) 920 cxt.debug_lock = true; 921 #endif 922 923 /* Initialize the statistics so that each run gets its own numbers. */ 924 925 lock_is_write_held = 0; 926 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 927 if (cxt.lwsa == NULL) { 928 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 929 firsterr = -ENOMEM; 930 goto unwind; 931 } 932 for (i = 0; i < cxt.nrealwriters_stress; i++) { 933 cxt.lwsa[i].n_lock_fail = 0; 934 cxt.lwsa[i].n_lock_acquired = 0; 935 } 936 937 if (cxt.cur_ops->readlock) { 938 if (nreaders_stress >= 0) 939 cxt.nrealreaders_stress = nreaders_stress; 940 else { 941 /* 942 * By default distribute evenly the number of 943 * readers and writers. We still run the same number 944 * of threads as the writer-only locks default. 945 */ 946 if (nwriters_stress < 0) /* user doesn't care */ 947 cxt.nrealwriters_stress = num_online_cpus(); 948 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 949 } 950 951 lock_is_read_held = 0; 952 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 953 if (cxt.lrsa == NULL) { 954 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 955 firsterr = -ENOMEM; 956 kfree(cxt.lwsa); 957 cxt.lwsa = NULL; 958 goto unwind; 959 } 960 961 for (i = 0; i < cxt.nrealreaders_stress; i++) { 962 cxt.lrsa[i].n_lock_fail = 0; 963 cxt.lrsa[i].n_lock_acquired = 0; 964 } 965 } 966 967 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 968 969 /* Prepare torture context. */ 970 if (onoff_interval > 0) { 971 firsterr = torture_onoff_init(onoff_holdoff * HZ, 972 onoff_interval * HZ); 973 if (firsterr) 974 goto unwind; 975 } 976 if (shuffle_interval > 0) { 977 firsterr = torture_shuffle_init(shuffle_interval); 978 if (firsterr) 979 goto unwind; 980 } 981 if (shutdown_secs > 0) { 982 firsterr = torture_shutdown_init(shutdown_secs, 983 lock_torture_cleanup); 984 if (firsterr) 985 goto unwind; 986 } 987 if (stutter > 0) { 988 firsterr = torture_stutter_init(stutter); 989 if (firsterr) 990 goto unwind; 991 } 992 993 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 994 GFP_KERNEL); 995 if (writer_tasks == NULL) { 996 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 997 firsterr = -ENOMEM; 998 goto unwind; 999 } 1000 1001 if (cxt.cur_ops->readlock) { 1002 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 1003 GFP_KERNEL); 1004 if (reader_tasks == NULL) { 1005 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1006 kfree(writer_tasks); 1007 writer_tasks = NULL; 1008 firsterr = -ENOMEM; 1009 goto unwind; 1010 } 1011 } 1012 1013 /* 1014 * Create the kthreads and start torturing (oh, those poor little locks). 1015 * 1016 * TODO: Note that we interleave writers with readers, giving writers a 1017 * slight advantage, by creating its kthread first. This can be modified 1018 * for very specific needs, or even let the user choose the policy, if 1019 * ever wanted. 1020 */ 1021 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1022 j < cxt.nrealreaders_stress; i++, j++) { 1023 if (i >= cxt.nrealwriters_stress) 1024 goto create_reader; 1025 1026 /* Create writer. */ 1027 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1028 writer_tasks[i]); 1029 if (firsterr) 1030 goto unwind; 1031 1032 create_reader: 1033 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1034 continue; 1035 /* Create reader. */ 1036 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1037 reader_tasks[j]); 1038 if (firsterr) 1039 goto unwind; 1040 } 1041 if (stat_interval > 0) { 1042 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1043 stats_task); 1044 if (firsterr) 1045 goto unwind; 1046 } 1047 torture_init_end(); 1048 return 0; 1049 1050 unwind: 1051 torture_init_end(); 1052 lock_torture_cleanup(); 1053 return firsterr; 1054 } 1055 1056 module_init(lock_torture_init); 1057 module_exit(lock_torture_cleanup); 1058