1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 25 #define pr_fmt(fmt) fmt 26 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/kthread.h> 30 #include <linux/sched/rt.h> 31 #include <linux/spinlock.h> 32 #include <linux/rwlock.h> 33 #include <linux/mutex.h> 34 #include <linux/rwsem.h> 35 #include <linux/smp.h> 36 #include <linux/interrupt.h> 37 #include <linux/sched.h> 38 #include <uapi/linux/sched/types.h> 39 #include <linux/rtmutex.h> 40 #include <linux/atomic.h> 41 #include <linux/moduleparam.h> 42 #include <linux/delay.h> 43 #include <linux/slab.h> 44 #include <linux/percpu-rwsem.h> 45 #include <linux/torture.h> 46 47 MODULE_LICENSE("GPL"); 48 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 49 50 torture_param(int, nwriters_stress, -1, 51 "Number of write-locking stress-test threads"); 52 torture_param(int, nreaders_stress, -1, 53 "Number of read-locking stress-test threads"); 54 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 55 torture_param(int, onoff_interval, 0, 56 "Time between CPU hotplugs (s), 0=disable"); 57 torture_param(int, shuffle_interval, 3, 58 "Number of jiffies between shuffles, 0=disable"); 59 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 60 torture_param(int, stat_interval, 60, 61 "Number of seconds between stats printk()s"); 62 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 63 torture_param(int, verbose, 1, 64 "Enable verbose debugging printk()s"); 65 66 static char *torture_type = "spin_lock"; 67 module_param(torture_type, charp, 0444); 68 MODULE_PARM_DESC(torture_type, 69 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 70 71 static struct task_struct *stats_task; 72 static struct task_struct **writer_tasks; 73 static struct task_struct **reader_tasks; 74 75 static bool lock_is_write_held; 76 static bool lock_is_read_held; 77 78 struct lock_stress_stats { 79 long n_lock_fail; 80 long n_lock_acquired; 81 }; 82 83 /* Forward reference. */ 84 static void lock_torture_cleanup(void); 85 86 /* 87 * Operations vector for selecting different types of tests. 88 */ 89 struct lock_torture_ops { 90 void (*init)(void); 91 int (*writelock)(void); 92 void (*write_delay)(struct torture_random_state *trsp); 93 void (*task_boost)(struct torture_random_state *trsp); 94 void (*writeunlock)(void); 95 int (*readlock)(void); 96 void (*read_delay)(struct torture_random_state *trsp); 97 void (*readunlock)(void); 98 99 unsigned long flags; /* for irq spinlocks */ 100 const char *name; 101 }; 102 103 struct lock_torture_cxt { 104 int nrealwriters_stress; 105 int nrealreaders_stress; 106 bool debug_lock; 107 atomic_t n_lock_torture_errors; 108 struct lock_torture_ops *cur_ops; 109 struct lock_stress_stats *lwsa; /* writer statistics */ 110 struct lock_stress_stats *lrsa; /* reader statistics */ 111 }; 112 static struct lock_torture_cxt cxt = { 0, 0, false, 113 ATOMIC_INIT(0), 114 NULL, NULL}; 115 /* 116 * Definitions for lock torture testing. 117 */ 118 119 static int torture_lock_busted_write_lock(void) 120 { 121 return 0; /* BUGGY, do not use in real life!!! */ 122 } 123 124 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 125 { 126 const unsigned long longdelay_ms = 100; 127 128 /* We want a long delay occasionally to force massive contention. */ 129 if (!(torture_random(trsp) % 130 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 131 mdelay(longdelay_ms); 132 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 133 torture_preempt_schedule(); /* Allow test to be preempted. */ 134 } 135 136 static void torture_lock_busted_write_unlock(void) 137 { 138 /* BUGGY, do not use in real life!!! */ 139 } 140 141 static void torture_boost_dummy(struct torture_random_state *trsp) 142 { 143 /* Only rtmutexes care about priority */ 144 } 145 146 static struct lock_torture_ops lock_busted_ops = { 147 .writelock = torture_lock_busted_write_lock, 148 .write_delay = torture_lock_busted_write_delay, 149 .task_boost = torture_boost_dummy, 150 .writeunlock = torture_lock_busted_write_unlock, 151 .readlock = NULL, 152 .read_delay = NULL, 153 .readunlock = NULL, 154 .name = "lock_busted" 155 }; 156 157 static DEFINE_SPINLOCK(torture_spinlock); 158 159 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 160 { 161 spin_lock(&torture_spinlock); 162 return 0; 163 } 164 165 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 166 { 167 const unsigned long shortdelay_us = 2; 168 const unsigned long longdelay_ms = 100; 169 170 /* We want a short delay mostly to emulate likely code, and 171 * we want a long delay occasionally to force massive contention. 172 */ 173 if (!(torture_random(trsp) % 174 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 175 mdelay(longdelay_ms); 176 if (!(torture_random(trsp) % 177 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 178 udelay(shortdelay_us); 179 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 180 torture_preempt_schedule(); /* Allow test to be preempted. */ 181 } 182 183 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 184 { 185 spin_unlock(&torture_spinlock); 186 } 187 188 static struct lock_torture_ops spin_lock_ops = { 189 .writelock = torture_spin_lock_write_lock, 190 .write_delay = torture_spin_lock_write_delay, 191 .task_boost = torture_boost_dummy, 192 .writeunlock = torture_spin_lock_write_unlock, 193 .readlock = NULL, 194 .read_delay = NULL, 195 .readunlock = NULL, 196 .name = "spin_lock" 197 }; 198 199 static int torture_spin_lock_write_lock_irq(void) 200 __acquires(torture_spinlock) 201 { 202 unsigned long flags; 203 204 spin_lock_irqsave(&torture_spinlock, flags); 205 cxt.cur_ops->flags = flags; 206 return 0; 207 } 208 209 static void torture_lock_spin_write_unlock_irq(void) 210 __releases(torture_spinlock) 211 { 212 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 213 } 214 215 static struct lock_torture_ops spin_lock_irq_ops = { 216 .writelock = torture_spin_lock_write_lock_irq, 217 .write_delay = torture_spin_lock_write_delay, 218 .task_boost = torture_boost_dummy, 219 .writeunlock = torture_lock_spin_write_unlock_irq, 220 .readlock = NULL, 221 .read_delay = NULL, 222 .readunlock = NULL, 223 .name = "spin_lock_irq" 224 }; 225 226 static DEFINE_RWLOCK(torture_rwlock); 227 228 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 229 { 230 write_lock(&torture_rwlock); 231 return 0; 232 } 233 234 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 235 { 236 const unsigned long shortdelay_us = 2; 237 const unsigned long longdelay_ms = 100; 238 239 /* We want a short delay mostly to emulate likely code, and 240 * we want a long delay occasionally to force massive contention. 241 */ 242 if (!(torture_random(trsp) % 243 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 244 mdelay(longdelay_ms); 245 else 246 udelay(shortdelay_us); 247 } 248 249 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 250 { 251 write_unlock(&torture_rwlock); 252 } 253 254 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 255 { 256 read_lock(&torture_rwlock); 257 return 0; 258 } 259 260 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 261 { 262 const unsigned long shortdelay_us = 10; 263 const unsigned long longdelay_ms = 100; 264 265 /* We want a short delay mostly to emulate likely code, and 266 * we want a long delay occasionally to force massive contention. 267 */ 268 if (!(torture_random(trsp) % 269 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 270 mdelay(longdelay_ms); 271 else 272 udelay(shortdelay_us); 273 } 274 275 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 276 { 277 read_unlock(&torture_rwlock); 278 } 279 280 static struct lock_torture_ops rw_lock_ops = { 281 .writelock = torture_rwlock_write_lock, 282 .write_delay = torture_rwlock_write_delay, 283 .task_boost = torture_boost_dummy, 284 .writeunlock = torture_rwlock_write_unlock, 285 .readlock = torture_rwlock_read_lock, 286 .read_delay = torture_rwlock_read_delay, 287 .readunlock = torture_rwlock_read_unlock, 288 .name = "rw_lock" 289 }; 290 291 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 292 { 293 unsigned long flags; 294 295 write_lock_irqsave(&torture_rwlock, flags); 296 cxt.cur_ops->flags = flags; 297 return 0; 298 } 299 300 static void torture_rwlock_write_unlock_irq(void) 301 __releases(torture_rwlock) 302 { 303 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 304 } 305 306 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 307 { 308 unsigned long flags; 309 310 read_lock_irqsave(&torture_rwlock, flags); 311 cxt.cur_ops->flags = flags; 312 return 0; 313 } 314 315 static void torture_rwlock_read_unlock_irq(void) 316 __releases(torture_rwlock) 317 { 318 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 319 } 320 321 static struct lock_torture_ops rw_lock_irq_ops = { 322 .writelock = torture_rwlock_write_lock_irq, 323 .write_delay = torture_rwlock_write_delay, 324 .task_boost = torture_boost_dummy, 325 .writeunlock = torture_rwlock_write_unlock_irq, 326 .readlock = torture_rwlock_read_lock_irq, 327 .read_delay = torture_rwlock_read_delay, 328 .readunlock = torture_rwlock_read_unlock_irq, 329 .name = "rw_lock_irq" 330 }; 331 332 static DEFINE_MUTEX(torture_mutex); 333 334 static int torture_mutex_lock(void) __acquires(torture_mutex) 335 { 336 mutex_lock(&torture_mutex); 337 return 0; 338 } 339 340 static void torture_mutex_delay(struct torture_random_state *trsp) 341 { 342 const unsigned long longdelay_ms = 100; 343 344 /* We want a long delay occasionally to force massive contention. */ 345 if (!(torture_random(trsp) % 346 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 347 mdelay(longdelay_ms * 5); 348 else 349 mdelay(longdelay_ms / 5); 350 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 351 torture_preempt_schedule(); /* Allow test to be preempted. */ 352 } 353 354 static void torture_mutex_unlock(void) __releases(torture_mutex) 355 { 356 mutex_unlock(&torture_mutex); 357 } 358 359 static struct lock_torture_ops mutex_lock_ops = { 360 .writelock = torture_mutex_lock, 361 .write_delay = torture_mutex_delay, 362 .task_boost = torture_boost_dummy, 363 .writeunlock = torture_mutex_unlock, 364 .readlock = NULL, 365 .read_delay = NULL, 366 .readunlock = NULL, 367 .name = "mutex_lock" 368 }; 369 370 #include <linux/ww_mutex.h> 371 static DEFINE_WD_CLASS(torture_ww_class); 372 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); 373 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); 374 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); 375 376 static int torture_ww_mutex_lock(void) 377 __acquires(torture_ww_mutex_0) 378 __acquires(torture_ww_mutex_1) 379 __acquires(torture_ww_mutex_2) 380 { 381 LIST_HEAD(list); 382 struct reorder_lock { 383 struct list_head link; 384 struct ww_mutex *lock; 385 } locks[3], *ll, *ln; 386 struct ww_acquire_ctx ctx; 387 388 locks[0].lock = &torture_ww_mutex_0; 389 list_add(&locks[0].link, &list); 390 391 locks[1].lock = &torture_ww_mutex_1; 392 list_add(&locks[1].link, &list); 393 394 locks[2].lock = &torture_ww_mutex_2; 395 list_add(&locks[2].link, &list); 396 397 ww_acquire_init(&ctx, &torture_ww_class); 398 399 list_for_each_entry(ll, &list, link) { 400 int err; 401 402 err = ww_mutex_lock(ll->lock, &ctx); 403 if (!err) 404 continue; 405 406 ln = ll; 407 list_for_each_entry_continue_reverse(ln, &list, link) 408 ww_mutex_unlock(ln->lock); 409 410 if (err != -EDEADLK) 411 return err; 412 413 ww_mutex_lock_slow(ll->lock, &ctx); 414 list_move(&ll->link, &list); 415 } 416 417 ww_acquire_fini(&ctx); 418 return 0; 419 } 420 421 static void torture_ww_mutex_unlock(void) 422 __releases(torture_ww_mutex_0) 423 __releases(torture_ww_mutex_1) 424 __releases(torture_ww_mutex_2) 425 { 426 ww_mutex_unlock(&torture_ww_mutex_0); 427 ww_mutex_unlock(&torture_ww_mutex_1); 428 ww_mutex_unlock(&torture_ww_mutex_2); 429 } 430 431 static struct lock_torture_ops ww_mutex_lock_ops = { 432 .writelock = torture_ww_mutex_lock, 433 .write_delay = torture_mutex_delay, 434 .task_boost = torture_boost_dummy, 435 .writeunlock = torture_ww_mutex_unlock, 436 .readlock = NULL, 437 .read_delay = NULL, 438 .readunlock = NULL, 439 .name = "ww_mutex_lock" 440 }; 441 442 #ifdef CONFIG_RT_MUTEXES 443 static DEFINE_RT_MUTEX(torture_rtmutex); 444 445 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 446 { 447 rt_mutex_lock(&torture_rtmutex); 448 return 0; 449 } 450 451 static void torture_rtmutex_boost(struct torture_random_state *trsp) 452 { 453 int policy; 454 struct sched_param param; 455 const unsigned int factor = 50000; /* yes, quite arbitrary */ 456 457 if (!rt_task(current)) { 458 /* 459 * Boost priority once every ~50k operations. When the 460 * task tries to take the lock, the rtmutex it will account 461 * for the new priority, and do any corresponding pi-dance. 462 */ 463 if (trsp && !(torture_random(trsp) % 464 (cxt.nrealwriters_stress * factor))) { 465 policy = SCHED_FIFO; 466 param.sched_priority = MAX_RT_PRIO - 1; 467 } else /* common case, do nothing */ 468 return; 469 } else { 470 /* 471 * The task will remain boosted for another ~500k operations, 472 * then restored back to its original prio, and so forth. 473 * 474 * When @trsp is nil, we want to force-reset the task for 475 * stopping the kthread. 476 */ 477 if (!trsp || !(torture_random(trsp) % 478 (cxt.nrealwriters_stress * factor * 2))) { 479 policy = SCHED_NORMAL; 480 param.sched_priority = 0; 481 } else /* common case, do nothing */ 482 return; 483 } 484 485 sched_setscheduler_nocheck(current, policy, ¶m); 486 } 487 488 static void torture_rtmutex_delay(struct torture_random_state *trsp) 489 { 490 const unsigned long shortdelay_us = 2; 491 const unsigned long longdelay_ms = 100; 492 493 /* 494 * We want a short delay mostly to emulate likely code, and 495 * we want a long delay occasionally to force massive contention. 496 */ 497 if (!(torture_random(trsp) % 498 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 499 mdelay(longdelay_ms); 500 if (!(torture_random(trsp) % 501 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 502 udelay(shortdelay_us); 503 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 504 torture_preempt_schedule(); /* Allow test to be preempted. */ 505 } 506 507 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 508 { 509 rt_mutex_unlock(&torture_rtmutex); 510 } 511 512 static struct lock_torture_ops rtmutex_lock_ops = { 513 .writelock = torture_rtmutex_lock, 514 .write_delay = torture_rtmutex_delay, 515 .task_boost = torture_rtmutex_boost, 516 .writeunlock = torture_rtmutex_unlock, 517 .readlock = NULL, 518 .read_delay = NULL, 519 .readunlock = NULL, 520 .name = "rtmutex_lock" 521 }; 522 #endif 523 524 static DECLARE_RWSEM(torture_rwsem); 525 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 526 { 527 down_write(&torture_rwsem); 528 return 0; 529 } 530 531 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 532 { 533 const unsigned long longdelay_ms = 100; 534 535 /* We want a long delay occasionally to force massive contention. */ 536 if (!(torture_random(trsp) % 537 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 538 mdelay(longdelay_ms * 10); 539 else 540 mdelay(longdelay_ms / 10); 541 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 542 torture_preempt_schedule(); /* Allow test to be preempted. */ 543 } 544 545 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 546 { 547 up_write(&torture_rwsem); 548 } 549 550 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 551 { 552 down_read(&torture_rwsem); 553 return 0; 554 } 555 556 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 557 { 558 const unsigned long longdelay_ms = 100; 559 560 /* We want a long delay occasionally to force massive contention. */ 561 if (!(torture_random(trsp) % 562 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 563 mdelay(longdelay_ms * 2); 564 else 565 mdelay(longdelay_ms / 2); 566 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 567 torture_preempt_schedule(); /* Allow test to be preempted. */ 568 } 569 570 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 571 { 572 up_read(&torture_rwsem); 573 } 574 575 static struct lock_torture_ops rwsem_lock_ops = { 576 .writelock = torture_rwsem_down_write, 577 .write_delay = torture_rwsem_write_delay, 578 .task_boost = torture_boost_dummy, 579 .writeunlock = torture_rwsem_up_write, 580 .readlock = torture_rwsem_down_read, 581 .read_delay = torture_rwsem_read_delay, 582 .readunlock = torture_rwsem_up_read, 583 .name = "rwsem_lock" 584 }; 585 586 #include <linux/percpu-rwsem.h> 587 static struct percpu_rw_semaphore pcpu_rwsem; 588 589 void torture_percpu_rwsem_init(void) 590 { 591 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 592 } 593 594 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 595 { 596 percpu_down_write(&pcpu_rwsem); 597 return 0; 598 } 599 600 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 601 { 602 percpu_up_write(&pcpu_rwsem); 603 } 604 605 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 606 { 607 percpu_down_read(&pcpu_rwsem); 608 return 0; 609 } 610 611 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 612 { 613 percpu_up_read(&pcpu_rwsem); 614 } 615 616 static struct lock_torture_ops percpu_rwsem_lock_ops = { 617 .init = torture_percpu_rwsem_init, 618 .writelock = torture_percpu_rwsem_down_write, 619 .write_delay = torture_rwsem_write_delay, 620 .task_boost = torture_boost_dummy, 621 .writeunlock = torture_percpu_rwsem_up_write, 622 .readlock = torture_percpu_rwsem_down_read, 623 .read_delay = torture_rwsem_read_delay, 624 .readunlock = torture_percpu_rwsem_up_read, 625 .name = "percpu_rwsem_lock" 626 }; 627 628 /* 629 * Lock torture writer kthread. Repeatedly acquires and releases 630 * the lock, checking for duplicate acquisitions. 631 */ 632 static int lock_torture_writer(void *arg) 633 { 634 struct lock_stress_stats *lwsp = arg; 635 static DEFINE_TORTURE_RANDOM(rand); 636 637 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 638 set_user_nice(current, MAX_NICE); 639 640 do { 641 if ((torture_random(&rand) & 0xfffff) == 0) 642 schedule_timeout_uninterruptible(1); 643 644 cxt.cur_ops->task_boost(&rand); 645 cxt.cur_ops->writelock(); 646 if (WARN_ON_ONCE(lock_is_write_held)) 647 lwsp->n_lock_fail++; 648 lock_is_write_held = 1; 649 if (WARN_ON_ONCE(lock_is_read_held)) 650 lwsp->n_lock_fail++; /* rare, but... */ 651 652 lwsp->n_lock_acquired++; 653 cxt.cur_ops->write_delay(&rand); 654 lock_is_write_held = 0; 655 cxt.cur_ops->writeunlock(); 656 657 stutter_wait("lock_torture_writer"); 658 } while (!torture_must_stop()); 659 660 cxt.cur_ops->task_boost(NULL); /* reset prio */ 661 torture_kthread_stopping("lock_torture_writer"); 662 return 0; 663 } 664 665 /* 666 * Lock torture reader kthread. Repeatedly acquires and releases 667 * the reader lock. 668 */ 669 static int lock_torture_reader(void *arg) 670 { 671 struct lock_stress_stats *lrsp = arg; 672 static DEFINE_TORTURE_RANDOM(rand); 673 674 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 675 set_user_nice(current, MAX_NICE); 676 677 do { 678 if ((torture_random(&rand) & 0xfffff) == 0) 679 schedule_timeout_uninterruptible(1); 680 681 cxt.cur_ops->readlock(); 682 lock_is_read_held = 1; 683 if (WARN_ON_ONCE(lock_is_write_held)) 684 lrsp->n_lock_fail++; /* rare, but... */ 685 686 lrsp->n_lock_acquired++; 687 cxt.cur_ops->read_delay(&rand); 688 lock_is_read_held = 0; 689 cxt.cur_ops->readunlock(); 690 691 stutter_wait("lock_torture_reader"); 692 } while (!torture_must_stop()); 693 torture_kthread_stopping("lock_torture_reader"); 694 return 0; 695 } 696 697 /* 698 * Create an lock-torture-statistics message in the specified buffer. 699 */ 700 static void __torture_print_stats(char *page, 701 struct lock_stress_stats *statp, bool write) 702 { 703 bool fail = 0; 704 int i, n_stress; 705 long max = 0, min = statp ? statp[0].n_lock_acquired : 0; 706 long long sum = 0; 707 708 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 709 for (i = 0; i < n_stress; i++) { 710 if (statp[i].n_lock_fail) 711 fail = true; 712 sum += statp[i].n_lock_acquired; 713 if (max < statp[i].n_lock_fail) 714 max = statp[i].n_lock_fail; 715 if (min > statp[i].n_lock_fail) 716 min = statp[i].n_lock_fail; 717 } 718 page += sprintf(page, 719 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 720 write ? "Writes" : "Reads ", 721 sum, max, min, max / 2 > min ? "???" : "", 722 fail, fail ? "!!!" : ""); 723 if (fail) 724 atomic_inc(&cxt.n_lock_torture_errors); 725 } 726 727 /* 728 * Print torture statistics. Caller must ensure that there is only one 729 * call to this function at a given time!!! This is normally accomplished 730 * by relying on the module system to only have one copy of the module 731 * loaded, and then by giving the lock_torture_stats kthread full control 732 * (or the init/cleanup functions when lock_torture_stats thread is not 733 * running). 734 */ 735 static void lock_torture_stats_print(void) 736 { 737 int size = cxt.nrealwriters_stress * 200 + 8192; 738 char *buf; 739 740 if (cxt.cur_ops->readlock) 741 size += cxt.nrealreaders_stress * 200 + 8192; 742 743 buf = kmalloc(size, GFP_KERNEL); 744 if (!buf) { 745 pr_err("lock_torture_stats_print: Out of memory, need: %d", 746 size); 747 return; 748 } 749 750 __torture_print_stats(buf, cxt.lwsa, true); 751 pr_alert("%s", buf); 752 kfree(buf); 753 754 if (cxt.cur_ops->readlock) { 755 buf = kmalloc(size, GFP_KERNEL); 756 if (!buf) { 757 pr_err("lock_torture_stats_print: Out of memory, need: %d", 758 size); 759 return; 760 } 761 762 __torture_print_stats(buf, cxt.lrsa, false); 763 pr_alert("%s", buf); 764 kfree(buf); 765 } 766 } 767 768 /* 769 * Periodically prints torture statistics, if periodic statistics printing 770 * was specified via the stat_interval module parameter. 771 * 772 * No need to worry about fullstop here, since this one doesn't reference 773 * volatile state or register callbacks. 774 */ 775 static int lock_torture_stats(void *arg) 776 { 777 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 778 do { 779 schedule_timeout_interruptible(stat_interval * HZ); 780 lock_torture_stats_print(); 781 torture_shutdown_absorb("lock_torture_stats"); 782 } while (!torture_must_stop()); 783 torture_kthread_stopping("lock_torture_stats"); 784 return 0; 785 } 786 787 static inline void 788 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 789 const char *tag) 790 { 791 pr_alert("%s" TORTURE_FLAG 792 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 793 torture_type, tag, cxt.debug_lock ? " [debug]": "", 794 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 795 verbose, shuffle_interval, stutter, shutdown_secs, 796 onoff_interval, onoff_holdoff); 797 } 798 799 static void lock_torture_cleanup(void) 800 { 801 int i; 802 803 if (torture_cleanup_begin()) 804 return; 805 806 /* 807 * Indicates early cleanup, meaning that the test has not run, 808 * such as when passing bogus args when loading the module. As 809 * such, only perform the underlying torture-specific cleanups, 810 * and avoid anything related to locktorture. 811 */ 812 if (!cxt.lwsa && !cxt.lrsa) 813 goto end; 814 815 if (writer_tasks) { 816 for (i = 0; i < cxt.nrealwriters_stress; i++) 817 torture_stop_kthread(lock_torture_writer, 818 writer_tasks[i]); 819 kfree(writer_tasks); 820 writer_tasks = NULL; 821 } 822 823 if (reader_tasks) { 824 for (i = 0; i < cxt.nrealreaders_stress; i++) 825 torture_stop_kthread(lock_torture_reader, 826 reader_tasks[i]); 827 kfree(reader_tasks); 828 reader_tasks = NULL; 829 } 830 831 torture_stop_kthread(lock_torture_stats, stats_task); 832 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 833 834 if (atomic_read(&cxt.n_lock_torture_errors)) 835 lock_torture_print_module_parms(cxt.cur_ops, 836 "End of test: FAILURE"); 837 else if (torture_onoff_failures()) 838 lock_torture_print_module_parms(cxt.cur_ops, 839 "End of test: LOCK_HOTPLUG"); 840 else 841 lock_torture_print_module_parms(cxt.cur_ops, 842 "End of test: SUCCESS"); 843 844 kfree(cxt.lwsa); 845 kfree(cxt.lrsa); 846 847 end: 848 torture_cleanup_end(); 849 } 850 851 static int __init lock_torture_init(void) 852 { 853 int i, j; 854 int firsterr = 0; 855 static struct lock_torture_ops *torture_ops[] = { 856 &lock_busted_ops, 857 &spin_lock_ops, &spin_lock_irq_ops, 858 &rw_lock_ops, &rw_lock_irq_ops, 859 &mutex_lock_ops, 860 &ww_mutex_lock_ops, 861 #ifdef CONFIG_RT_MUTEXES 862 &rtmutex_lock_ops, 863 #endif 864 &rwsem_lock_ops, 865 &percpu_rwsem_lock_ops, 866 }; 867 868 if (!torture_init_begin(torture_type, verbose)) 869 return -EBUSY; 870 871 /* Process args and tell the world that the torturer is on the job. */ 872 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 873 cxt.cur_ops = torture_ops[i]; 874 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 875 break; 876 } 877 if (i == ARRAY_SIZE(torture_ops)) { 878 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 879 torture_type); 880 pr_alert("lock-torture types:"); 881 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 882 pr_alert(" %s", torture_ops[i]->name); 883 pr_alert("\n"); 884 firsterr = -EINVAL; 885 goto unwind; 886 } 887 888 if (nwriters_stress == 0 && nreaders_stress == 0) { 889 pr_alert("lock-torture: must run at least one locking thread\n"); 890 firsterr = -EINVAL; 891 goto unwind; 892 } 893 894 if (cxt.cur_ops->init) 895 cxt.cur_ops->init(); 896 897 if (nwriters_stress >= 0) 898 cxt.nrealwriters_stress = nwriters_stress; 899 else 900 cxt.nrealwriters_stress = 2 * num_online_cpus(); 901 902 #ifdef CONFIG_DEBUG_MUTEXES 903 if (strncmp(torture_type, "mutex", 5) == 0) 904 cxt.debug_lock = true; 905 #endif 906 #ifdef CONFIG_DEBUG_RT_MUTEXES 907 if (strncmp(torture_type, "rtmutex", 7) == 0) 908 cxt.debug_lock = true; 909 #endif 910 #ifdef CONFIG_DEBUG_SPINLOCK 911 if ((strncmp(torture_type, "spin", 4) == 0) || 912 (strncmp(torture_type, "rw_lock", 7) == 0)) 913 cxt.debug_lock = true; 914 #endif 915 916 /* Initialize the statistics so that each run gets its own numbers. */ 917 if (nwriters_stress) { 918 lock_is_write_held = 0; 919 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress, 920 sizeof(*cxt.lwsa), 921 GFP_KERNEL); 922 if (cxt.lwsa == NULL) { 923 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 924 firsterr = -ENOMEM; 925 goto unwind; 926 } 927 928 for (i = 0; i < cxt.nrealwriters_stress; i++) { 929 cxt.lwsa[i].n_lock_fail = 0; 930 cxt.lwsa[i].n_lock_acquired = 0; 931 } 932 } 933 934 if (cxt.cur_ops->readlock) { 935 if (nreaders_stress >= 0) 936 cxt.nrealreaders_stress = nreaders_stress; 937 else { 938 /* 939 * By default distribute evenly the number of 940 * readers and writers. We still run the same number 941 * of threads as the writer-only locks default. 942 */ 943 if (nwriters_stress < 0) /* user doesn't care */ 944 cxt.nrealwriters_stress = num_online_cpus(); 945 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 946 } 947 948 if (nreaders_stress) { 949 lock_is_read_held = 0; 950 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress, 951 sizeof(*cxt.lrsa), 952 GFP_KERNEL); 953 if (cxt.lrsa == NULL) { 954 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 955 firsterr = -ENOMEM; 956 kfree(cxt.lwsa); 957 cxt.lwsa = NULL; 958 goto unwind; 959 } 960 961 for (i = 0; i < cxt.nrealreaders_stress; i++) { 962 cxt.lrsa[i].n_lock_fail = 0; 963 cxt.lrsa[i].n_lock_acquired = 0; 964 } 965 } 966 } 967 968 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 969 970 /* Prepare torture context. */ 971 if (onoff_interval > 0) { 972 firsterr = torture_onoff_init(onoff_holdoff * HZ, 973 onoff_interval * HZ); 974 if (firsterr) 975 goto unwind; 976 } 977 if (shuffle_interval > 0) { 978 firsterr = torture_shuffle_init(shuffle_interval); 979 if (firsterr) 980 goto unwind; 981 } 982 if (shutdown_secs > 0) { 983 firsterr = torture_shutdown_init(shutdown_secs, 984 lock_torture_cleanup); 985 if (firsterr) 986 goto unwind; 987 } 988 if (stutter > 0) { 989 firsterr = torture_stutter_init(stutter); 990 if (firsterr) 991 goto unwind; 992 } 993 994 if (nwriters_stress) { 995 writer_tasks = kcalloc(cxt.nrealwriters_stress, 996 sizeof(writer_tasks[0]), 997 GFP_KERNEL); 998 if (writer_tasks == NULL) { 999 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 1000 firsterr = -ENOMEM; 1001 goto unwind; 1002 } 1003 } 1004 1005 if (cxt.cur_ops->readlock) { 1006 reader_tasks = kcalloc(cxt.nrealreaders_stress, 1007 sizeof(reader_tasks[0]), 1008 GFP_KERNEL); 1009 if (reader_tasks == NULL) { 1010 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 1011 kfree(writer_tasks); 1012 writer_tasks = NULL; 1013 firsterr = -ENOMEM; 1014 goto unwind; 1015 } 1016 } 1017 1018 /* 1019 * Create the kthreads and start torturing (oh, those poor little locks). 1020 * 1021 * TODO: Note that we interleave writers with readers, giving writers a 1022 * slight advantage, by creating its kthread first. This can be modified 1023 * for very specific needs, or even let the user choose the policy, if 1024 * ever wanted. 1025 */ 1026 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 1027 j < cxt.nrealreaders_stress; i++, j++) { 1028 if (i >= cxt.nrealwriters_stress) 1029 goto create_reader; 1030 1031 /* Create writer. */ 1032 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 1033 writer_tasks[i]); 1034 if (firsterr) 1035 goto unwind; 1036 1037 create_reader: 1038 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 1039 continue; 1040 /* Create reader. */ 1041 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 1042 reader_tasks[j]); 1043 if (firsterr) 1044 goto unwind; 1045 } 1046 if (stat_interval > 0) { 1047 firsterr = torture_create_kthread(lock_torture_stats, NULL, 1048 stats_task); 1049 if (firsterr) 1050 goto unwind; 1051 } 1052 torture_init_end(); 1053 return 0; 1054 1055 unwind: 1056 torture_init_end(); 1057 lock_torture_cleanup(); 1058 return firsterr; 1059 } 1060 1061 module_init(lock_torture_init); 1062 module_exit(lock_torture_cleanup); 1063