1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/kthread.h> 27 #include <linux/sched/rt.h> 28 #include <linux/spinlock.h> 29 #include <linux/rwlock.h> 30 #include <linux/mutex.h> 31 #include <linux/rwsem.h> 32 #include <linux/smp.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <uapi/linux/sched/types.h> 36 #include <linux/rtmutex.h> 37 #include <linux/atomic.h> 38 #include <linux/moduleparam.h> 39 #include <linux/delay.h> 40 #include <linux/slab.h> 41 #include <linux/percpu-rwsem.h> 42 #include <linux/torture.h> 43 44 MODULE_LICENSE("GPL"); 45 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 46 47 torture_param(int, nwriters_stress, -1, 48 "Number of write-locking stress-test threads"); 49 torture_param(int, nreaders_stress, -1, 50 "Number of read-locking stress-test threads"); 51 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 52 torture_param(int, onoff_interval, 0, 53 "Time between CPU hotplugs (s), 0=disable"); 54 torture_param(int, shuffle_interval, 3, 55 "Number of jiffies between shuffles, 0=disable"); 56 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 57 torture_param(int, stat_interval, 60, 58 "Number of seconds between stats printk()s"); 59 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 60 torture_param(bool, verbose, true, 61 "Enable verbose debugging printk()s"); 62 63 static char *torture_type = "spin_lock"; 64 module_param(torture_type, charp, 0444); 65 MODULE_PARM_DESC(torture_type, 66 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 67 68 static struct task_struct *stats_task; 69 static struct task_struct **writer_tasks; 70 static struct task_struct **reader_tasks; 71 72 static bool lock_is_write_held; 73 static bool lock_is_read_held; 74 75 struct lock_stress_stats { 76 long n_lock_fail; 77 long n_lock_acquired; 78 }; 79 80 /* Forward reference. */ 81 static void lock_torture_cleanup(void); 82 83 /* 84 * Operations vector for selecting different types of tests. 85 */ 86 struct lock_torture_ops { 87 void (*init)(void); 88 int (*writelock)(void); 89 void (*write_delay)(struct torture_random_state *trsp); 90 void (*task_boost)(struct torture_random_state *trsp); 91 void (*writeunlock)(void); 92 int (*readlock)(void); 93 void (*read_delay)(struct torture_random_state *trsp); 94 void (*readunlock)(void); 95 96 unsigned long flags; /* for irq spinlocks */ 97 const char *name; 98 }; 99 100 struct lock_torture_cxt { 101 int nrealwriters_stress; 102 int nrealreaders_stress; 103 bool debug_lock; 104 atomic_t n_lock_torture_errors; 105 struct lock_torture_ops *cur_ops; 106 struct lock_stress_stats *lwsa; /* writer statistics */ 107 struct lock_stress_stats *lrsa; /* reader statistics */ 108 }; 109 static struct lock_torture_cxt cxt = { 0, 0, false, 110 ATOMIC_INIT(0), 111 NULL, NULL}; 112 /* 113 * Definitions for lock torture testing. 114 */ 115 116 static int torture_lock_busted_write_lock(void) 117 { 118 return 0; /* BUGGY, do not use in real life!!! */ 119 } 120 121 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 122 { 123 const unsigned long longdelay_ms = 100; 124 125 /* We want a long delay occasionally to force massive contention. */ 126 if (!(torture_random(trsp) % 127 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 128 mdelay(longdelay_ms); 129 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 130 torture_preempt_schedule(); /* Allow test to be preempted. */ 131 } 132 133 static void torture_lock_busted_write_unlock(void) 134 { 135 /* BUGGY, do not use in real life!!! */ 136 } 137 138 static void torture_boost_dummy(struct torture_random_state *trsp) 139 { 140 /* Only rtmutexes care about priority */ 141 } 142 143 static struct lock_torture_ops lock_busted_ops = { 144 .writelock = torture_lock_busted_write_lock, 145 .write_delay = torture_lock_busted_write_delay, 146 .task_boost = torture_boost_dummy, 147 .writeunlock = torture_lock_busted_write_unlock, 148 .readlock = NULL, 149 .read_delay = NULL, 150 .readunlock = NULL, 151 .name = "lock_busted" 152 }; 153 154 static DEFINE_SPINLOCK(torture_spinlock); 155 156 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 157 { 158 spin_lock(&torture_spinlock); 159 return 0; 160 } 161 162 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 163 { 164 const unsigned long shortdelay_us = 2; 165 const unsigned long longdelay_ms = 100; 166 167 /* We want a short delay mostly to emulate likely code, and 168 * we want a long delay occasionally to force massive contention. 169 */ 170 if (!(torture_random(trsp) % 171 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 172 mdelay(longdelay_ms); 173 if (!(torture_random(trsp) % 174 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 175 udelay(shortdelay_us); 176 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 177 torture_preempt_schedule(); /* Allow test to be preempted. */ 178 } 179 180 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 181 { 182 spin_unlock(&torture_spinlock); 183 } 184 185 static struct lock_torture_ops spin_lock_ops = { 186 .writelock = torture_spin_lock_write_lock, 187 .write_delay = torture_spin_lock_write_delay, 188 .task_boost = torture_boost_dummy, 189 .writeunlock = torture_spin_lock_write_unlock, 190 .readlock = NULL, 191 .read_delay = NULL, 192 .readunlock = NULL, 193 .name = "spin_lock" 194 }; 195 196 static int torture_spin_lock_write_lock_irq(void) 197 __acquires(torture_spinlock) 198 { 199 unsigned long flags; 200 201 spin_lock_irqsave(&torture_spinlock, flags); 202 cxt.cur_ops->flags = flags; 203 return 0; 204 } 205 206 static void torture_lock_spin_write_unlock_irq(void) 207 __releases(torture_spinlock) 208 { 209 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 210 } 211 212 static struct lock_torture_ops spin_lock_irq_ops = { 213 .writelock = torture_spin_lock_write_lock_irq, 214 .write_delay = torture_spin_lock_write_delay, 215 .task_boost = torture_boost_dummy, 216 .writeunlock = torture_lock_spin_write_unlock_irq, 217 .readlock = NULL, 218 .read_delay = NULL, 219 .readunlock = NULL, 220 .name = "spin_lock_irq" 221 }; 222 223 static DEFINE_RWLOCK(torture_rwlock); 224 225 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 226 { 227 write_lock(&torture_rwlock); 228 return 0; 229 } 230 231 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 232 { 233 const unsigned long shortdelay_us = 2; 234 const unsigned long longdelay_ms = 100; 235 236 /* We want a short delay mostly to emulate likely code, and 237 * we want a long delay occasionally to force massive contention. 238 */ 239 if (!(torture_random(trsp) % 240 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 241 mdelay(longdelay_ms); 242 else 243 udelay(shortdelay_us); 244 } 245 246 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 247 { 248 write_unlock(&torture_rwlock); 249 } 250 251 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 252 { 253 read_lock(&torture_rwlock); 254 return 0; 255 } 256 257 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 258 { 259 const unsigned long shortdelay_us = 10; 260 const unsigned long longdelay_ms = 100; 261 262 /* We want a short delay mostly to emulate likely code, and 263 * we want a long delay occasionally to force massive contention. 264 */ 265 if (!(torture_random(trsp) % 266 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 267 mdelay(longdelay_ms); 268 else 269 udelay(shortdelay_us); 270 } 271 272 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 273 { 274 read_unlock(&torture_rwlock); 275 } 276 277 static struct lock_torture_ops rw_lock_ops = { 278 .writelock = torture_rwlock_write_lock, 279 .write_delay = torture_rwlock_write_delay, 280 .task_boost = torture_boost_dummy, 281 .writeunlock = torture_rwlock_write_unlock, 282 .readlock = torture_rwlock_read_lock, 283 .read_delay = torture_rwlock_read_delay, 284 .readunlock = torture_rwlock_read_unlock, 285 .name = "rw_lock" 286 }; 287 288 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 289 { 290 unsigned long flags; 291 292 write_lock_irqsave(&torture_rwlock, flags); 293 cxt.cur_ops->flags = flags; 294 return 0; 295 } 296 297 static void torture_rwlock_write_unlock_irq(void) 298 __releases(torture_rwlock) 299 { 300 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 301 } 302 303 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 304 { 305 unsigned long flags; 306 307 read_lock_irqsave(&torture_rwlock, flags); 308 cxt.cur_ops->flags = flags; 309 return 0; 310 } 311 312 static void torture_rwlock_read_unlock_irq(void) 313 __releases(torture_rwlock) 314 { 315 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 316 } 317 318 static struct lock_torture_ops rw_lock_irq_ops = { 319 .writelock = torture_rwlock_write_lock_irq, 320 .write_delay = torture_rwlock_write_delay, 321 .task_boost = torture_boost_dummy, 322 .writeunlock = torture_rwlock_write_unlock_irq, 323 .readlock = torture_rwlock_read_lock_irq, 324 .read_delay = torture_rwlock_read_delay, 325 .readunlock = torture_rwlock_read_unlock_irq, 326 .name = "rw_lock_irq" 327 }; 328 329 static DEFINE_MUTEX(torture_mutex); 330 331 static int torture_mutex_lock(void) __acquires(torture_mutex) 332 { 333 mutex_lock(&torture_mutex); 334 return 0; 335 } 336 337 static void torture_mutex_delay(struct torture_random_state *trsp) 338 { 339 const unsigned long longdelay_ms = 100; 340 341 /* We want a long delay occasionally to force massive contention. */ 342 if (!(torture_random(trsp) % 343 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 344 mdelay(longdelay_ms * 5); 345 else 346 mdelay(longdelay_ms / 5); 347 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 348 torture_preempt_schedule(); /* Allow test to be preempted. */ 349 } 350 351 static void torture_mutex_unlock(void) __releases(torture_mutex) 352 { 353 mutex_unlock(&torture_mutex); 354 } 355 356 static struct lock_torture_ops mutex_lock_ops = { 357 .writelock = torture_mutex_lock, 358 .write_delay = torture_mutex_delay, 359 .task_boost = torture_boost_dummy, 360 .writeunlock = torture_mutex_unlock, 361 .readlock = NULL, 362 .read_delay = NULL, 363 .readunlock = NULL, 364 .name = "mutex_lock" 365 }; 366 367 #include <linux/ww_mutex.h> 368 static DEFINE_WW_CLASS(torture_ww_class); 369 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); 370 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); 371 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); 372 373 static int torture_ww_mutex_lock(void) 374 __acquires(torture_ww_mutex_0) 375 __acquires(torture_ww_mutex_1) 376 __acquires(torture_ww_mutex_2) 377 { 378 LIST_HEAD(list); 379 struct reorder_lock { 380 struct list_head link; 381 struct ww_mutex *lock; 382 } locks[3], *ll, *ln; 383 struct ww_acquire_ctx ctx; 384 385 locks[0].lock = &torture_ww_mutex_0; 386 list_add(&locks[0].link, &list); 387 388 locks[1].lock = &torture_ww_mutex_1; 389 list_add(&locks[1].link, &list); 390 391 locks[2].lock = &torture_ww_mutex_2; 392 list_add(&locks[2].link, &list); 393 394 ww_acquire_init(&ctx, &torture_ww_class); 395 396 list_for_each_entry(ll, &list, link) { 397 int err; 398 399 err = ww_mutex_lock(ll->lock, &ctx); 400 if (!err) 401 continue; 402 403 ln = ll; 404 list_for_each_entry_continue_reverse(ln, &list, link) 405 ww_mutex_unlock(ln->lock); 406 407 if (err != -EDEADLK) 408 return err; 409 410 ww_mutex_lock_slow(ll->lock, &ctx); 411 list_move(&ll->link, &list); 412 } 413 414 ww_acquire_fini(&ctx); 415 return 0; 416 } 417 418 static void torture_ww_mutex_unlock(void) 419 __releases(torture_ww_mutex_0) 420 __releases(torture_ww_mutex_1) 421 __releases(torture_ww_mutex_2) 422 { 423 ww_mutex_unlock(&torture_ww_mutex_0); 424 ww_mutex_unlock(&torture_ww_mutex_1); 425 ww_mutex_unlock(&torture_ww_mutex_2); 426 } 427 428 static struct lock_torture_ops ww_mutex_lock_ops = { 429 .writelock = torture_ww_mutex_lock, 430 .write_delay = torture_mutex_delay, 431 .task_boost = torture_boost_dummy, 432 .writeunlock = torture_ww_mutex_unlock, 433 .readlock = NULL, 434 .read_delay = NULL, 435 .readunlock = NULL, 436 .name = "ww_mutex_lock" 437 }; 438 439 #ifdef CONFIG_RT_MUTEXES 440 static DEFINE_RT_MUTEX(torture_rtmutex); 441 442 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 443 { 444 rt_mutex_lock(&torture_rtmutex); 445 return 0; 446 } 447 448 static void torture_rtmutex_boost(struct torture_random_state *trsp) 449 { 450 int policy; 451 struct sched_param param; 452 const unsigned int factor = 50000; /* yes, quite arbitrary */ 453 454 if (!rt_task(current)) { 455 /* 456 * Boost priority once every ~50k operations. When the 457 * task tries to take the lock, the rtmutex it will account 458 * for the new priority, and do any corresponding pi-dance. 459 */ 460 if (trsp && !(torture_random(trsp) % 461 (cxt.nrealwriters_stress * factor))) { 462 policy = SCHED_FIFO; 463 param.sched_priority = MAX_RT_PRIO - 1; 464 } else /* common case, do nothing */ 465 return; 466 } else { 467 /* 468 * The task will remain boosted for another ~500k operations, 469 * then restored back to its original prio, and so forth. 470 * 471 * When @trsp is nil, we want to force-reset the task for 472 * stopping the kthread. 473 */ 474 if (!trsp || !(torture_random(trsp) % 475 (cxt.nrealwriters_stress * factor * 2))) { 476 policy = SCHED_NORMAL; 477 param.sched_priority = 0; 478 } else /* common case, do nothing */ 479 return; 480 } 481 482 sched_setscheduler_nocheck(current, policy, ¶m); 483 } 484 485 static void torture_rtmutex_delay(struct torture_random_state *trsp) 486 { 487 const unsigned long shortdelay_us = 2; 488 const unsigned long longdelay_ms = 100; 489 490 /* 491 * We want a short delay mostly to emulate likely code, and 492 * we want a long delay occasionally to force massive contention. 493 */ 494 if (!(torture_random(trsp) % 495 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 496 mdelay(longdelay_ms); 497 if (!(torture_random(trsp) % 498 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 499 udelay(shortdelay_us); 500 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 501 torture_preempt_schedule(); /* Allow test to be preempted. */ 502 } 503 504 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 505 { 506 rt_mutex_unlock(&torture_rtmutex); 507 } 508 509 static struct lock_torture_ops rtmutex_lock_ops = { 510 .writelock = torture_rtmutex_lock, 511 .write_delay = torture_rtmutex_delay, 512 .task_boost = torture_rtmutex_boost, 513 .writeunlock = torture_rtmutex_unlock, 514 .readlock = NULL, 515 .read_delay = NULL, 516 .readunlock = NULL, 517 .name = "rtmutex_lock" 518 }; 519 #endif 520 521 static DECLARE_RWSEM(torture_rwsem); 522 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 523 { 524 down_write(&torture_rwsem); 525 return 0; 526 } 527 528 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 529 { 530 const unsigned long longdelay_ms = 100; 531 532 /* We want a long delay occasionally to force massive contention. */ 533 if (!(torture_random(trsp) % 534 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 535 mdelay(longdelay_ms * 10); 536 else 537 mdelay(longdelay_ms / 10); 538 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 539 torture_preempt_schedule(); /* Allow test to be preempted. */ 540 } 541 542 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 543 { 544 up_write(&torture_rwsem); 545 } 546 547 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 548 { 549 down_read(&torture_rwsem); 550 return 0; 551 } 552 553 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 554 { 555 const unsigned long longdelay_ms = 100; 556 557 /* We want a long delay occasionally to force massive contention. */ 558 if (!(torture_random(trsp) % 559 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 560 mdelay(longdelay_ms * 2); 561 else 562 mdelay(longdelay_ms / 2); 563 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 564 torture_preempt_schedule(); /* Allow test to be preempted. */ 565 } 566 567 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 568 { 569 up_read(&torture_rwsem); 570 } 571 572 static struct lock_torture_ops rwsem_lock_ops = { 573 .writelock = torture_rwsem_down_write, 574 .write_delay = torture_rwsem_write_delay, 575 .task_boost = torture_boost_dummy, 576 .writeunlock = torture_rwsem_up_write, 577 .readlock = torture_rwsem_down_read, 578 .read_delay = torture_rwsem_read_delay, 579 .readunlock = torture_rwsem_up_read, 580 .name = "rwsem_lock" 581 }; 582 583 #include <linux/percpu-rwsem.h> 584 static struct percpu_rw_semaphore pcpu_rwsem; 585 586 void torture_percpu_rwsem_init(void) 587 { 588 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 589 } 590 591 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 592 { 593 percpu_down_write(&pcpu_rwsem); 594 return 0; 595 } 596 597 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 598 { 599 percpu_up_write(&pcpu_rwsem); 600 } 601 602 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 603 { 604 percpu_down_read(&pcpu_rwsem); 605 return 0; 606 } 607 608 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 609 { 610 percpu_up_read(&pcpu_rwsem); 611 } 612 613 static struct lock_torture_ops percpu_rwsem_lock_ops = { 614 .init = torture_percpu_rwsem_init, 615 .writelock = torture_percpu_rwsem_down_write, 616 .write_delay = torture_rwsem_write_delay, 617 .task_boost = torture_boost_dummy, 618 .writeunlock = torture_percpu_rwsem_up_write, 619 .readlock = torture_percpu_rwsem_down_read, 620 .read_delay = torture_rwsem_read_delay, 621 .readunlock = torture_percpu_rwsem_up_read, 622 .name = "percpu_rwsem_lock" 623 }; 624 625 /* 626 * Lock torture writer kthread. Repeatedly acquires and releases 627 * the lock, checking for duplicate acquisitions. 628 */ 629 static int lock_torture_writer(void *arg) 630 { 631 struct lock_stress_stats *lwsp = arg; 632 static DEFINE_TORTURE_RANDOM(rand); 633 634 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 635 set_user_nice(current, MAX_NICE); 636 637 do { 638 if ((torture_random(&rand) & 0xfffff) == 0) 639 schedule_timeout_uninterruptible(1); 640 641 cxt.cur_ops->task_boost(&rand); 642 cxt.cur_ops->writelock(); 643 if (WARN_ON_ONCE(lock_is_write_held)) 644 lwsp->n_lock_fail++; 645 lock_is_write_held = 1; 646 if (WARN_ON_ONCE(lock_is_read_held)) 647 lwsp->n_lock_fail++; /* rare, but... */ 648 649 lwsp->n_lock_acquired++; 650 cxt.cur_ops->write_delay(&rand); 651 lock_is_write_held = 0; 652 cxt.cur_ops->writeunlock(); 653 654 stutter_wait("lock_torture_writer"); 655 } while (!torture_must_stop()); 656 657 cxt.cur_ops->task_boost(NULL); /* reset prio */ 658 torture_kthread_stopping("lock_torture_writer"); 659 return 0; 660 } 661 662 /* 663 * Lock torture reader kthread. Repeatedly acquires and releases 664 * the reader lock. 665 */ 666 static int lock_torture_reader(void *arg) 667 { 668 struct lock_stress_stats *lrsp = arg; 669 static DEFINE_TORTURE_RANDOM(rand); 670 671 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 672 set_user_nice(current, MAX_NICE); 673 674 do { 675 if ((torture_random(&rand) & 0xfffff) == 0) 676 schedule_timeout_uninterruptible(1); 677 678 cxt.cur_ops->readlock(); 679 lock_is_read_held = 1; 680 if (WARN_ON_ONCE(lock_is_write_held)) 681 lrsp->n_lock_fail++; /* rare, but... */ 682 683 lrsp->n_lock_acquired++; 684 cxt.cur_ops->read_delay(&rand); 685 lock_is_read_held = 0; 686 cxt.cur_ops->readunlock(); 687 688 stutter_wait("lock_torture_reader"); 689 } while (!torture_must_stop()); 690 torture_kthread_stopping("lock_torture_reader"); 691 return 0; 692 } 693 694 /* 695 * Create an lock-torture-statistics message in the specified buffer. 696 */ 697 static void __torture_print_stats(char *page, 698 struct lock_stress_stats *statp, bool write) 699 { 700 bool fail = 0; 701 int i, n_stress; 702 long max = 0, min = statp ? statp[0].n_lock_acquired : 0; 703 long long sum = 0; 704 705 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 706 for (i = 0; i < n_stress; i++) { 707 if (statp[i].n_lock_fail) 708 fail = true; 709 sum += statp[i].n_lock_acquired; 710 if (max < statp[i].n_lock_fail) 711 max = statp[i].n_lock_fail; 712 if (min > statp[i].n_lock_fail) 713 min = statp[i].n_lock_fail; 714 } 715 page += sprintf(page, 716 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 717 write ? "Writes" : "Reads ", 718 sum, max, min, max / 2 > min ? "???" : "", 719 fail, fail ? "!!!" : ""); 720 if (fail) 721 atomic_inc(&cxt.n_lock_torture_errors); 722 } 723 724 /* 725 * Print torture statistics. Caller must ensure that there is only one 726 * call to this function at a given time!!! This is normally accomplished 727 * by relying on the module system to only have one copy of the module 728 * loaded, and then by giving the lock_torture_stats kthread full control 729 * (or the init/cleanup functions when lock_torture_stats thread is not 730 * running). 731 */ 732 static void lock_torture_stats_print(void) 733 { 734 int size = cxt.nrealwriters_stress * 200 + 8192; 735 char *buf; 736 737 if (cxt.cur_ops->readlock) 738 size += cxt.nrealreaders_stress * 200 + 8192; 739 740 buf = kmalloc(size, GFP_KERNEL); 741 if (!buf) { 742 pr_err("lock_torture_stats_print: Out of memory, need: %d", 743 size); 744 return; 745 } 746 747 __torture_print_stats(buf, cxt.lwsa, true); 748 pr_alert("%s", buf); 749 kfree(buf); 750 751 if (cxt.cur_ops->readlock) { 752 buf = kmalloc(size, GFP_KERNEL); 753 if (!buf) { 754 pr_err("lock_torture_stats_print: Out of memory, need: %d", 755 size); 756 return; 757 } 758 759 __torture_print_stats(buf, cxt.lrsa, false); 760 pr_alert("%s", buf); 761 kfree(buf); 762 } 763 } 764 765 /* 766 * Periodically prints torture statistics, if periodic statistics printing 767 * was specified via the stat_interval module parameter. 768 * 769 * No need to worry about fullstop here, since this one doesn't reference 770 * volatile state or register callbacks. 771 */ 772 static int lock_torture_stats(void *arg) 773 { 774 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 775 do { 776 schedule_timeout_interruptible(stat_interval * HZ); 777 lock_torture_stats_print(); 778 torture_shutdown_absorb("lock_torture_stats"); 779 } while (!torture_must_stop()); 780 torture_kthread_stopping("lock_torture_stats"); 781 return 0; 782 } 783 784 static inline void 785 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 786 const char *tag) 787 { 788 pr_alert("%s" TORTURE_FLAG 789 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 790 torture_type, tag, cxt.debug_lock ? " [debug]": "", 791 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 792 verbose, shuffle_interval, stutter, shutdown_secs, 793 onoff_interval, onoff_holdoff); 794 } 795 796 static void lock_torture_cleanup(void) 797 { 798 int i; 799 800 if (torture_cleanup_begin()) 801 return; 802 803 /* 804 * Indicates early cleanup, meaning that the test has not run, 805 * such as when passing bogus args when loading the module. As 806 * such, only perform the underlying torture-specific cleanups, 807 * and avoid anything related to locktorture. 808 */ 809 if (!cxt.lwsa && !cxt.lrsa) 810 goto end; 811 812 if (writer_tasks) { 813 for (i = 0; i < cxt.nrealwriters_stress; i++) 814 torture_stop_kthread(lock_torture_writer, 815 writer_tasks[i]); 816 kfree(writer_tasks); 817 writer_tasks = NULL; 818 } 819 820 if (reader_tasks) { 821 for (i = 0; i < cxt.nrealreaders_stress; i++) 822 torture_stop_kthread(lock_torture_reader, 823 reader_tasks[i]); 824 kfree(reader_tasks); 825 reader_tasks = NULL; 826 } 827 828 torture_stop_kthread(lock_torture_stats, stats_task); 829 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 830 831 if (atomic_read(&cxt.n_lock_torture_errors)) 832 lock_torture_print_module_parms(cxt.cur_ops, 833 "End of test: FAILURE"); 834 else if (torture_onoff_failures()) 835 lock_torture_print_module_parms(cxt.cur_ops, 836 "End of test: LOCK_HOTPLUG"); 837 else 838 lock_torture_print_module_parms(cxt.cur_ops, 839 "End of test: SUCCESS"); 840 841 kfree(cxt.lwsa); 842 kfree(cxt.lrsa); 843 844 end: 845 torture_cleanup_end(); 846 } 847 848 static int __init lock_torture_init(void) 849 { 850 int i, j; 851 int firsterr = 0; 852 static struct lock_torture_ops *torture_ops[] = { 853 &lock_busted_ops, 854 &spin_lock_ops, &spin_lock_irq_ops, 855 &rw_lock_ops, &rw_lock_irq_ops, 856 &mutex_lock_ops, 857 &ww_mutex_lock_ops, 858 #ifdef CONFIG_RT_MUTEXES 859 &rtmutex_lock_ops, 860 #endif 861 &rwsem_lock_ops, 862 &percpu_rwsem_lock_ops, 863 }; 864 865 if (!torture_init_begin(torture_type, verbose)) 866 return -EBUSY; 867 868 /* Process args and tell the world that the torturer is on the job. */ 869 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 870 cxt.cur_ops = torture_ops[i]; 871 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 872 break; 873 } 874 if (i == ARRAY_SIZE(torture_ops)) { 875 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 876 torture_type); 877 pr_alert("lock-torture types:"); 878 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 879 pr_alert(" %s", torture_ops[i]->name); 880 pr_alert("\n"); 881 firsterr = -EINVAL; 882 goto unwind; 883 } 884 885 if (nwriters_stress == 0 && nreaders_stress == 0) { 886 pr_alert("lock-torture: must run at least one locking thread\n"); 887 firsterr = -EINVAL; 888 goto unwind; 889 } 890 891 if (cxt.cur_ops->init) 892 cxt.cur_ops->init(); 893 894 if (nwriters_stress >= 0) 895 cxt.nrealwriters_stress = nwriters_stress; 896 else 897 cxt.nrealwriters_stress = 2 * num_online_cpus(); 898 899 #ifdef CONFIG_DEBUG_MUTEXES 900 if (strncmp(torture_type, "mutex", 5) == 0) 901 cxt.debug_lock = true; 902 #endif 903 #ifdef CONFIG_DEBUG_RT_MUTEXES 904 if (strncmp(torture_type, "rtmutex", 7) == 0) 905 cxt.debug_lock = true; 906 #endif 907 #ifdef CONFIG_DEBUG_SPINLOCK 908 if ((strncmp(torture_type, "spin", 4) == 0) || 909 (strncmp(torture_type, "rw_lock", 7) == 0)) 910 cxt.debug_lock = true; 911 #endif 912 913 /* Initialize the statistics so that each run gets its own numbers. */ 914 if (nwriters_stress) { 915 lock_is_write_held = 0; 916 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 917 if (cxt.lwsa == NULL) { 918 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 919 firsterr = -ENOMEM; 920 goto unwind; 921 } 922 923 for (i = 0; i < cxt.nrealwriters_stress; i++) { 924 cxt.lwsa[i].n_lock_fail = 0; 925 cxt.lwsa[i].n_lock_acquired = 0; 926 } 927 } 928 929 if (cxt.cur_ops->readlock) { 930 if (nreaders_stress >= 0) 931 cxt.nrealreaders_stress = nreaders_stress; 932 else { 933 /* 934 * By default distribute evenly the number of 935 * readers and writers. We still run the same number 936 * of threads as the writer-only locks default. 937 */ 938 if (nwriters_stress < 0) /* user doesn't care */ 939 cxt.nrealwriters_stress = num_online_cpus(); 940 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 941 } 942 943 if (nreaders_stress) { 944 lock_is_read_held = 0; 945 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 946 if (cxt.lrsa == NULL) { 947 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 948 firsterr = -ENOMEM; 949 kfree(cxt.lwsa); 950 cxt.lwsa = NULL; 951 goto unwind; 952 } 953 954 for (i = 0; i < cxt.nrealreaders_stress; i++) { 955 cxt.lrsa[i].n_lock_fail = 0; 956 cxt.lrsa[i].n_lock_acquired = 0; 957 } 958 } 959 } 960 961 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 962 963 /* Prepare torture context. */ 964 if (onoff_interval > 0) { 965 firsterr = torture_onoff_init(onoff_holdoff * HZ, 966 onoff_interval * HZ); 967 if (firsterr) 968 goto unwind; 969 } 970 if (shuffle_interval > 0) { 971 firsterr = torture_shuffle_init(shuffle_interval); 972 if (firsterr) 973 goto unwind; 974 } 975 if (shutdown_secs > 0) { 976 firsterr = torture_shutdown_init(shutdown_secs, 977 lock_torture_cleanup); 978 if (firsterr) 979 goto unwind; 980 } 981 if (stutter > 0) { 982 firsterr = torture_stutter_init(stutter); 983 if (firsterr) 984 goto unwind; 985 } 986 987 if (nwriters_stress) { 988 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 989 GFP_KERNEL); 990 if (writer_tasks == NULL) { 991 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 992 firsterr = -ENOMEM; 993 goto unwind; 994 } 995 } 996 997 if (cxt.cur_ops->readlock) { 998 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 999 GFP_KERNEL); 1000 if (reader_tasks == NULL) { 1001 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1002 kfree(writer_tasks); 1003 writer_tasks = NULL; 1004 firsterr = -ENOMEM; 1005 goto unwind; 1006 } 1007 } 1008 1009 /* 1010 * Create the kthreads and start torturing (oh, those poor little locks). 1011 * 1012 * TODO: Note that we interleave writers with readers, giving writers a 1013 * slight advantage, by creating its kthread first. This can be modified 1014 * for very specific needs, or even let the user choose the policy, if 1015 * ever wanted. 1016 */ 1017 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1018 j < cxt.nrealreaders_stress; i++, j++) { 1019 if (i >= cxt.nrealwriters_stress) 1020 goto create_reader; 1021 1022 /* Create writer. */ 1023 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1024 writer_tasks[i]); 1025 if (firsterr) 1026 goto unwind; 1027 1028 create_reader: 1029 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1030 continue; 1031 /* Create reader. */ 1032 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1033 reader_tasks[j]); 1034 if (firsterr) 1035 goto unwind; 1036 } 1037 if (stat_interval > 0) { 1038 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1039 stats_task); 1040 if (firsterr) 1041 goto unwind; 1042 } 1043 torture_init_end(); 1044 return 0; 1045 1046 unwind: 1047 torture_init_end(); 1048 lock_torture_cleanup(); 1049 return firsterr; 1050 } 1051 1052 module_init(lock_torture_init); 1053 module_exit(lock_torture_cleanup); 1054