1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/kthread.h> 27 #include <linux/sched/rt.h> 28 #include <linux/spinlock.h> 29 #include <linux/rwlock.h> 30 #include <linux/mutex.h> 31 #include <linux/rwsem.h> 32 #include <linux/smp.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <linux/atomic.h> 36 #include <linux/moduleparam.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/percpu-rwsem.h> 40 #include <linux/torture.h> 41 42 MODULE_LICENSE("GPL"); 43 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 44 45 torture_param(int, nwriters_stress, -1, 46 "Number of write-locking stress-test threads"); 47 torture_param(int, nreaders_stress, -1, 48 "Number of read-locking stress-test threads"); 49 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 50 torture_param(int, onoff_interval, 0, 51 "Time between CPU hotplugs (s), 0=disable"); 52 torture_param(int, shuffle_interval, 3, 53 "Number of jiffies between shuffles, 0=disable"); 54 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 55 torture_param(int, stat_interval, 60, 56 "Number of seconds between stats printk()s"); 57 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 58 torture_param(bool, verbose, true, 59 "Enable verbose debugging printk()s"); 60 61 static char *torture_type = "spin_lock"; 62 module_param(torture_type, charp, 0444); 63 MODULE_PARM_DESC(torture_type, 64 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 65 66 static struct task_struct *stats_task; 67 static struct task_struct **writer_tasks; 68 static struct task_struct **reader_tasks; 69 70 static bool lock_is_write_held; 71 static bool lock_is_read_held; 72 73 struct lock_stress_stats { 74 long n_lock_fail; 75 long n_lock_acquired; 76 }; 77 78 #if defined(MODULE) 79 #define LOCKTORTURE_RUNNABLE_INIT 1 80 #else 81 #define LOCKTORTURE_RUNNABLE_INIT 0 82 #endif 83 int torture_runnable = LOCKTORTURE_RUNNABLE_INIT; 84 module_param(torture_runnable, int, 0444); 85 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); 86 87 /* Forward reference. */ 88 static void lock_torture_cleanup(void); 89 90 /* 91 * Operations vector for selecting different types of tests. 92 */ 93 struct lock_torture_ops { 94 void (*init)(void); 95 int (*writelock)(void); 96 void (*write_delay)(struct torture_random_state *trsp); 97 void (*task_boost)(struct torture_random_state *trsp); 98 void (*writeunlock)(void); 99 int (*readlock)(void); 100 void (*read_delay)(struct torture_random_state *trsp); 101 void (*readunlock)(void); 102 103 unsigned long flags; /* for irq spinlocks */ 104 const char *name; 105 }; 106 107 struct lock_torture_cxt { 108 int nrealwriters_stress; 109 int nrealreaders_stress; 110 bool debug_lock; 111 atomic_t n_lock_torture_errors; 112 struct lock_torture_ops *cur_ops; 113 struct lock_stress_stats *lwsa; /* writer statistics */ 114 struct lock_stress_stats *lrsa; /* reader statistics */ 115 }; 116 static struct lock_torture_cxt cxt = { 0, 0, false, 117 ATOMIC_INIT(0), 118 NULL, NULL}; 119 /* 120 * Definitions for lock torture testing. 121 */ 122 123 static int torture_lock_busted_write_lock(void) 124 { 125 return 0; /* BUGGY, do not use in real life!!! */ 126 } 127 128 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 129 { 130 const unsigned long longdelay_ms = 100; 131 132 /* We want a long delay occasionally to force massive contention. */ 133 if (!(torture_random(trsp) % 134 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 135 mdelay(longdelay_ms); 136 #ifdef CONFIG_PREEMPT 137 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 138 preempt_schedule(); /* Allow test to be preempted. */ 139 #endif 140 } 141 142 static void torture_lock_busted_write_unlock(void) 143 { 144 /* BUGGY, do not use in real life!!! */ 145 } 146 147 static void torture_boost_dummy(struct torture_random_state *trsp) 148 { 149 /* Only rtmutexes care about priority */ 150 } 151 152 static struct lock_torture_ops lock_busted_ops = { 153 .writelock = torture_lock_busted_write_lock, 154 .write_delay = torture_lock_busted_write_delay, 155 .task_boost = torture_boost_dummy, 156 .writeunlock = torture_lock_busted_write_unlock, 157 .readlock = NULL, 158 .read_delay = NULL, 159 .readunlock = NULL, 160 .name = "lock_busted" 161 }; 162 163 static DEFINE_SPINLOCK(torture_spinlock); 164 165 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 166 { 167 spin_lock(&torture_spinlock); 168 return 0; 169 } 170 171 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 172 { 173 const unsigned long shortdelay_us = 2; 174 const unsigned long longdelay_ms = 100; 175 176 /* We want a short delay mostly to emulate likely code, and 177 * we want a long delay occasionally to force massive contention. 178 */ 179 if (!(torture_random(trsp) % 180 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 181 mdelay(longdelay_ms); 182 if (!(torture_random(trsp) % 183 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 184 udelay(shortdelay_us); 185 #ifdef CONFIG_PREEMPT 186 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 187 preempt_schedule(); /* Allow test to be preempted. */ 188 #endif 189 } 190 191 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 192 { 193 spin_unlock(&torture_spinlock); 194 } 195 196 static struct lock_torture_ops spin_lock_ops = { 197 .writelock = torture_spin_lock_write_lock, 198 .write_delay = torture_spin_lock_write_delay, 199 .task_boost = torture_boost_dummy, 200 .writeunlock = torture_spin_lock_write_unlock, 201 .readlock = NULL, 202 .read_delay = NULL, 203 .readunlock = NULL, 204 .name = "spin_lock" 205 }; 206 207 static int torture_spin_lock_write_lock_irq(void) 208 __acquires(torture_spinlock) 209 { 210 unsigned long flags; 211 212 spin_lock_irqsave(&torture_spinlock, flags); 213 cxt.cur_ops->flags = flags; 214 return 0; 215 } 216 217 static void torture_lock_spin_write_unlock_irq(void) 218 __releases(torture_spinlock) 219 { 220 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 221 } 222 223 static struct lock_torture_ops spin_lock_irq_ops = { 224 .writelock = torture_spin_lock_write_lock_irq, 225 .write_delay = torture_spin_lock_write_delay, 226 .task_boost = torture_boost_dummy, 227 .writeunlock = torture_lock_spin_write_unlock_irq, 228 .readlock = NULL, 229 .read_delay = NULL, 230 .readunlock = NULL, 231 .name = "spin_lock_irq" 232 }; 233 234 static DEFINE_RWLOCK(torture_rwlock); 235 236 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 237 { 238 write_lock(&torture_rwlock); 239 return 0; 240 } 241 242 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 243 { 244 const unsigned long shortdelay_us = 2; 245 const unsigned long longdelay_ms = 100; 246 247 /* We want a short delay mostly to emulate likely code, and 248 * we want a long delay occasionally to force massive contention. 249 */ 250 if (!(torture_random(trsp) % 251 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 252 mdelay(longdelay_ms); 253 else 254 udelay(shortdelay_us); 255 } 256 257 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 258 { 259 write_unlock(&torture_rwlock); 260 } 261 262 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 263 { 264 read_lock(&torture_rwlock); 265 return 0; 266 } 267 268 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 269 { 270 const unsigned long shortdelay_us = 10; 271 const unsigned long longdelay_ms = 100; 272 273 /* We want a short delay mostly to emulate likely code, and 274 * we want a long delay occasionally to force massive contention. 275 */ 276 if (!(torture_random(trsp) % 277 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 278 mdelay(longdelay_ms); 279 else 280 udelay(shortdelay_us); 281 } 282 283 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 284 { 285 read_unlock(&torture_rwlock); 286 } 287 288 static struct lock_torture_ops rw_lock_ops = { 289 .writelock = torture_rwlock_write_lock, 290 .write_delay = torture_rwlock_write_delay, 291 .task_boost = torture_boost_dummy, 292 .writeunlock = torture_rwlock_write_unlock, 293 .readlock = torture_rwlock_read_lock, 294 .read_delay = torture_rwlock_read_delay, 295 .readunlock = torture_rwlock_read_unlock, 296 .name = "rw_lock" 297 }; 298 299 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 300 { 301 unsigned long flags; 302 303 write_lock_irqsave(&torture_rwlock, flags); 304 cxt.cur_ops->flags = flags; 305 return 0; 306 } 307 308 static void torture_rwlock_write_unlock_irq(void) 309 __releases(torture_rwlock) 310 { 311 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 312 } 313 314 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 315 { 316 unsigned long flags; 317 318 read_lock_irqsave(&torture_rwlock, flags); 319 cxt.cur_ops->flags = flags; 320 return 0; 321 } 322 323 static void torture_rwlock_read_unlock_irq(void) 324 __releases(torture_rwlock) 325 { 326 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 327 } 328 329 static struct lock_torture_ops rw_lock_irq_ops = { 330 .writelock = torture_rwlock_write_lock_irq, 331 .write_delay = torture_rwlock_write_delay, 332 .task_boost = torture_boost_dummy, 333 .writeunlock = torture_rwlock_write_unlock_irq, 334 .readlock = torture_rwlock_read_lock_irq, 335 .read_delay = torture_rwlock_read_delay, 336 .readunlock = torture_rwlock_read_unlock_irq, 337 .name = "rw_lock_irq" 338 }; 339 340 static DEFINE_MUTEX(torture_mutex); 341 342 static int torture_mutex_lock(void) __acquires(torture_mutex) 343 { 344 mutex_lock(&torture_mutex); 345 return 0; 346 } 347 348 static void torture_mutex_delay(struct torture_random_state *trsp) 349 { 350 const unsigned long longdelay_ms = 100; 351 352 /* We want a long delay occasionally to force massive contention. */ 353 if (!(torture_random(trsp) % 354 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 355 mdelay(longdelay_ms * 5); 356 else 357 mdelay(longdelay_ms / 5); 358 #ifdef CONFIG_PREEMPT 359 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 360 preempt_schedule(); /* Allow test to be preempted. */ 361 #endif 362 } 363 364 static void torture_mutex_unlock(void) __releases(torture_mutex) 365 { 366 mutex_unlock(&torture_mutex); 367 } 368 369 static struct lock_torture_ops mutex_lock_ops = { 370 .writelock = torture_mutex_lock, 371 .write_delay = torture_mutex_delay, 372 .task_boost = torture_boost_dummy, 373 .writeunlock = torture_mutex_unlock, 374 .readlock = NULL, 375 .read_delay = NULL, 376 .readunlock = NULL, 377 .name = "mutex_lock" 378 }; 379 380 #ifdef CONFIG_RT_MUTEXES 381 static DEFINE_RT_MUTEX(torture_rtmutex); 382 383 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 384 { 385 rt_mutex_lock(&torture_rtmutex); 386 return 0; 387 } 388 389 static void torture_rtmutex_boost(struct torture_random_state *trsp) 390 { 391 int policy; 392 struct sched_param param; 393 const unsigned int factor = 50000; /* yes, quite arbitrary */ 394 395 if (!rt_task(current)) { 396 /* 397 * (1) Boost priority once every ~50k operations. When the 398 * task tries to take the lock, the rtmutex it will account 399 * for the new priority, and do any corresponding pi-dance. 400 */ 401 if (!(torture_random(trsp) % 402 (cxt.nrealwriters_stress * factor))) { 403 policy = SCHED_FIFO; 404 param.sched_priority = MAX_RT_PRIO - 1; 405 } else /* common case, do nothing */ 406 return; 407 } else { 408 /* 409 * The task will remain boosted for another ~500k operations, 410 * then restored back to its original prio, and so forth. 411 * 412 * When @trsp is nil, we want to force-reset the task for 413 * stopping the kthread. 414 */ 415 if (!trsp || !(torture_random(trsp) % 416 (cxt.nrealwriters_stress * factor * 2))) { 417 policy = SCHED_NORMAL; 418 param.sched_priority = 0; 419 } else /* common case, do nothing */ 420 return; 421 } 422 423 sched_setscheduler_nocheck(current, policy, ¶m); 424 } 425 426 static void torture_rtmutex_delay(struct torture_random_state *trsp) 427 { 428 const unsigned long shortdelay_us = 2; 429 const unsigned long longdelay_ms = 100; 430 431 /* 432 * We want a short delay mostly to emulate likely code, and 433 * we want a long delay occasionally to force massive contention. 434 */ 435 if (!(torture_random(trsp) % 436 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 437 mdelay(longdelay_ms); 438 if (!(torture_random(trsp) % 439 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 440 udelay(shortdelay_us); 441 #ifdef CONFIG_PREEMPT 442 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 443 preempt_schedule(); /* Allow test to be preempted. */ 444 #endif 445 } 446 447 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 448 { 449 rt_mutex_unlock(&torture_rtmutex); 450 } 451 452 static struct lock_torture_ops rtmutex_lock_ops = { 453 .writelock = torture_rtmutex_lock, 454 .write_delay = torture_rtmutex_delay, 455 .task_boost = torture_rtmutex_boost, 456 .writeunlock = torture_rtmutex_unlock, 457 .readlock = NULL, 458 .read_delay = NULL, 459 .readunlock = NULL, 460 .name = "rtmutex_lock" 461 }; 462 #endif 463 464 static DECLARE_RWSEM(torture_rwsem); 465 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 466 { 467 down_write(&torture_rwsem); 468 return 0; 469 } 470 471 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 472 { 473 const unsigned long longdelay_ms = 100; 474 475 /* We want a long delay occasionally to force massive contention. */ 476 if (!(torture_random(trsp) % 477 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 478 mdelay(longdelay_ms * 10); 479 else 480 mdelay(longdelay_ms / 10); 481 #ifdef CONFIG_PREEMPT 482 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 483 preempt_schedule(); /* Allow test to be preempted. */ 484 #endif 485 } 486 487 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 488 { 489 up_write(&torture_rwsem); 490 } 491 492 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 493 { 494 down_read(&torture_rwsem); 495 return 0; 496 } 497 498 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 499 { 500 const unsigned long longdelay_ms = 100; 501 502 /* We want a long delay occasionally to force massive contention. */ 503 if (!(torture_random(trsp) % 504 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 505 mdelay(longdelay_ms * 2); 506 else 507 mdelay(longdelay_ms / 2); 508 #ifdef CONFIG_PREEMPT 509 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 510 preempt_schedule(); /* Allow test to be preempted. */ 511 #endif 512 } 513 514 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 515 { 516 up_read(&torture_rwsem); 517 } 518 519 static struct lock_torture_ops rwsem_lock_ops = { 520 .writelock = torture_rwsem_down_write, 521 .write_delay = torture_rwsem_write_delay, 522 .task_boost = torture_boost_dummy, 523 .writeunlock = torture_rwsem_up_write, 524 .readlock = torture_rwsem_down_read, 525 .read_delay = torture_rwsem_read_delay, 526 .readunlock = torture_rwsem_up_read, 527 .name = "rwsem_lock" 528 }; 529 530 #include <linux/percpu-rwsem.h> 531 static struct percpu_rw_semaphore pcpu_rwsem; 532 533 void torture_percpu_rwsem_init(void) 534 { 535 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 536 } 537 538 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 539 { 540 percpu_down_write(&pcpu_rwsem); 541 return 0; 542 } 543 544 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 545 { 546 percpu_up_write(&pcpu_rwsem); 547 } 548 549 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 550 { 551 percpu_down_read(&pcpu_rwsem); 552 return 0; 553 } 554 555 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 556 { 557 percpu_up_read(&pcpu_rwsem); 558 } 559 560 static struct lock_torture_ops percpu_rwsem_lock_ops = { 561 .init = torture_percpu_rwsem_init, 562 .writelock = torture_percpu_rwsem_down_write, 563 .write_delay = torture_rwsem_write_delay, 564 .task_boost = torture_boost_dummy, 565 .writeunlock = torture_percpu_rwsem_up_write, 566 .readlock = torture_percpu_rwsem_down_read, 567 .read_delay = torture_rwsem_read_delay, 568 .readunlock = torture_percpu_rwsem_up_read, 569 .name = "percpu_rwsem_lock" 570 }; 571 572 /* 573 * Lock torture writer kthread. Repeatedly acquires and releases 574 * the lock, checking for duplicate acquisitions. 575 */ 576 static int lock_torture_writer(void *arg) 577 { 578 struct lock_stress_stats *lwsp = arg; 579 static DEFINE_TORTURE_RANDOM(rand); 580 581 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 582 set_user_nice(current, MAX_NICE); 583 584 do { 585 if ((torture_random(&rand) & 0xfffff) == 0) 586 schedule_timeout_uninterruptible(1); 587 588 cxt.cur_ops->task_boost(&rand); 589 cxt.cur_ops->writelock(); 590 if (WARN_ON_ONCE(lock_is_write_held)) 591 lwsp->n_lock_fail++; 592 lock_is_write_held = 1; 593 if (WARN_ON_ONCE(lock_is_read_held)) 594 lwsp->n_lock_fail++; /* rare, but... */ 595 596 lwsp->n_lock_acquired++; 597 cxt.cur_ops->write_delay(&rand); 598 lock_is_write_held = 0; 599 cxt.cur_ops->writeunlock(); 600 601 stutter_wait("lock_torture_writer"); 602 } while (!torture_must_stop()); 603 604 cxt.cur_ops->task_boost(NULL); /* reset prio */ 605 torture_kthread_stopping("lock_torture_writer"); 606 return 0; 607 } 608 609 /* 610 * Lock torture reader kthread. Repeatedly acquires and releases 611 * the reader lock. 612 */ 613 static int lock_torture_reader(void *arg) 614 { 615 struct lock_stress_stats *lrsp = arg; 616 static DEFINE_TORTURE_RANDOM(rand); 617 618 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 619 set_user_nice(current, MAX_NICE); 620 621 do { 622 if ((torture_random(&rand) & 0xfffff) == 0) 623 schedule_timeout_uninterruptible(1); 624 625 cxt.cur_ops->readlock(); 626 lock_is_read_held = 1; 627 if (WARN_ON_ONCE(lock_is_write_held)) 628 lrsp->n_lock_fail++; /* rare, but... */ 629 630 lrsp->n_lock_acquired++; 631 cxt.cur_ops->read_delay(&rand); 632 lock_is_read_held = 0; 633 cxt.cur_ops->readunlock(); 634 635 stutter_wait("lock_torture_reader"); 636 } while (!torture_must_stop()); 637 torture_kthread_stopping("lock_torture_reader"); 638 return 0; 639 } 640 641 /* 642 * Create an lock-torture-statistics message in the specified buffer. 643 */ 644 static void __torture_print_stats(char *page, 645 struct lock_stress_stats *statp, bool write) 646 { 647 bool fail = 0; 648 int i, n_stress; 649 long max = 0; 650 long min = statp[0].n_lock_acquired; 651 long long sum = 0; 652 653 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 654 for (i = 0; i < n_stress; i++) { 655 if (statp[i].n_lock_fail) 656 fail = true; 657 sum += statp[i].n_lock_acquired; 658 if (max < statp[i].n_lock_fail) 659 max = statp[i].n_lock_fail; 660 if (min > statp[i].n_lock_fail) 661 min = statp[i].n_lock_fail; 662 } 663 page += sprintf(page, 664 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 665 write ? "Writes" : "Reads ", 666 sum, max, min, max / 2 > min ? "???" : "", 667 fail, fail ? "!!!" : ""); 668 if (fail) 669 atomic_inc(&cxt.n_lock_torture_errors); 670 } 671 672 /* 673 * Print torture statistics. Caller must ensure that there is only one 674 * call to this function at a given time!!! This is normally accomplished 675 * by relying on the module system to only have one copy of the module 676 * loaded, and then by giving the lock_torture_stats kthread full control 677 * (or the init/cleanup functions when lock_torture_stats thread is not 678 * running). 679 */ 680 static void lock_torture_stats_print(void) 681 { 682 int size = cxt.nrealwriters_stress * 200 + 8192; 683 char *buf; 684 685 if (cxt.cur_ops->readlock) 686 size += cxt.nrealreaders_stress * 200 + 8192; 687 688 buf = kmalloc(size, GFP_KERNEL); 689 if (!buf) { 690 pr_err("lock_torture_stats_print: Out of memory, need: %d", 691 size); 692 return; 693 } 694 695 __torture_print_stats(buf, cxt.lwsa, true); 696 pr_alert("%s", buf); 697 kfree(buf); 698 699 if (cxt.cur_ops->readlock) { 700 buf = kmalloc(size, GFP_KERNEL); 701 if (!buf) { 702 pr_err("lock_torture_stats_print: Out of memory, need: %d", 703 size); 704 return; 705 } 706 707 __torture_print_stats(buf, cxt.lrsa, false); 708 pr_alert("%s", buf); 709 kfree(buf); 710 } 711 } 712 713 /* 714 * Periodically prints torture statistics, if periodic statistics printing 715 * was specified via the stat_interval module parameter. 716 * 717 * No need to worry about fullstop here, since this one doesn't reference 718 * volatile state or register callbacks. 719 */ 720 static int lock_torture_stats(void *arg) 721 { 722 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 723 do { 724 schedule_timeout_interruptible(stat_interval * HZ); 725 lock_torture_stats_print(); 726 torture_shutdown_absorb("lock_torture_stats"); 727 } while (!torture_must_stop()); 728 torture_kthread_stopping("lock_torture_stats"); 729 return 0; 730 } 731 732 static inline void 733 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 734 const char *tag) 735 { 736 pr_alert("%s" TORTURE_FLAG 737 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 738 torture_type, tag, cxt.debug_lock ? " [debug]": "", 739 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 740 verbose, shuffle_interval, stutter, shutdown_secs, 741 onoff_interval, onoff_holdoff); 742 } 743 744 static void lock_torture_cleanup(void) 745 { 746 int i; 747 748 if (torture_cleanup_begin()) 749 return; 750 751 if (writer_tasks) { 752 for (i = 0; i < cxt.nrealwriters_stress; i++) 753 torture_stop_kthread(lock_torture_writer, 754 writer_tasks[i]); 755 kfree(writer_tasks); 756 writer_tasks = NULL; 757 } 758 759 if (reader_tasks) { 760 for (i = 0; i < cxt.nrealreaders_stress; i++) 761 torture_stop_kthread(lock_torture_reader, 762 reader_tasks[i]); 763 kfree(reader_tasks); 764 reader_tasks = NULL; 765 } 766 767 torture_stop_kthread(lock_torture_stats, stats_task); 768 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 769 770 if (atomic_read(&cxt.n_lock_torture_errors)) 771 lock_torture_print_module_parms(cxt.cur_ops, 772 "End of test: FAILURE"); 773 else if (torture_onoff_failures()) 774 lock_torture_print_module_parms(cxt.cur_ops, 775 "End of test: LOCK_HOTPLUG"); 776 else 777 lock_torture_print_module_parms(cxt.cur_ops, 778 "End of test: SUCCESS"); 779 torture_cleanup_end(); 780 } 781 782 static int __init lock_torture_init(void) 783 { 784 int i, j; 785 int firsterr = 0; 786 static struct lock_torture_ops *torture_ops[] = { 787 &lock_busted_ops, 788 &spin_lock_ops, &spin_lock_irq_ops, 789 &rw_lock_ops, &rw_lock_irq_ops, 790 &mutex_lock_ops, 791 #ifdef CONFIG_RT_MUTEXES 792 &rtmutex_lock_ops, 793 #endif 794 &rwsem_lock_ops, 795 &percpu_rwsem_lock_ops, 796 }; 797 798 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 799 return -EBUSY; 800 801 /* Process args and tell the world that the torturer is on the job. */ 802 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 803 cxt.cur_ops = torture_ops[i]; 804 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 805 break; 806 } 807 if (i == ARRAY_SIZE(torture_ops)) { 808 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 809 torture_type); 810 pr_alert("lock-torture types:"); 811 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 812 pr_alert(" %s", torture_ops[i]->name); 813 pr_alert("\n"); 814 firsterr = -EINVAL; 815 goto unwind; 816 } 817 if (cxt.cur_ops->init) 818 cxt.cur_ops->init(); 819 820 if (nwriters_stress >= 0) 821 cxt.nrealwriters_stress = nwriters_stress; 822 else 823 cxt.nrealwriters_stress = 2 * num_online_cpus(); 824 825 #ifdef CONFIG_DEBUG_MUTEXES 826 if (strncmp(torture_type, "mutex", 5) == 0) 827 cxt.debug_lock = true; 828 #endif 829 #ifdef CONFIG_DEBUG_RT_MUTEXES 830 if (strncmp(torture_type, "rtmutex", 7) == 0) 831 cxt.debug_lock = true; 832 #endif 833 #ifdef CONFIG_DEBUG_SPINLOCK 834 if ((strncmp(torture_type, "spin", 4) == 0) || 835 (strncmp(torture_type, "rw_lock", 7) == 0)) 836 cxt.debug_lock = true; 837 #endif 838 839 /* Initialize the statistics so that each run gets its own numbers. */ 840 841 lock_is_write_held = 0; 842 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 843 if (cxt.lwsa == NULL) { 844 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 845 firsterr = -ENOMEM; 846 goto unwind; 847 } 848 for (i = 0; i < cxt.nrealwriters_stress; i++) { 849 cxt.lwsa[i].n_lock_fail = 0; 850 cxt.lwsa[i].n_lock_acquired = 0; 851 } 852 853 if (cxt.cur_ops->readlock) { 854 if (nreaders_stress >= 0) 855 cxt.nrealreaders_stress = nreaders_stress; 856 else { 857 /* 858 * By default distribute evenly the number of 859 * readers and writers. We still run the same number 860 * of threads as the writer-only locks default. 861 */ 862 if (nwriters_stress < 0) /* user doesn't care */ 863 cxt.nrealwriters_stress = num_online_cpus(); 864 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 865 } 866 867 lock_is_read_held = 0; 868 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 869 if (cxt.lrsa == NULL) { 870 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 871 firsterr = -ENOMEM; 872 kfree(cxt.lwsa); 873 goto unwind; 874 } 875 876 for (i = 0; i < cxt.nrealreaders_stress; i++) { 877 cxt.lrsa[i].n_lock_fail = 0; 878 cxt.lrsa[i].n_lock_acquired = 0; 879 } 880 } 881 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 882 883 /* Prepare torture context. */ 884 if (onoff_interval > 0) { 885 firsterr = torture_onoff_init(onoff_holdoff * HZ, 886 onoff_interval * HZ); 887 if (firsterr) 888 goto unwind; 889 } 890 if (shuffle_interval > 0) { 891 firsterr = torture_shuffle_init(shuffle_interval); 892 if (firsterr) 893 goto unwind; 894 } 895 if (shutdown_secs > 0) { 896 firsterr = torture_shutdown_init(shutdown_secs, 897 lock_torture_cleanup); 898 if (firsterr) 899 goto unwind; 900 } 901 if (stutter > 0) { 902 firsterr = torture_stutter_init(stutter); 903 if (firsterr) 904 goto unwind; 905 } 906 907 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 908 GFP_KERNEL); 909 if (writer_tasks == NULL) { 910 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 911 firsterr = -ENOMEM; 912 goto unwind; 913 } 914 915 if (cxt.cur_ops->readlock) { 916 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 917 GFP_KERNEL); 918 if (reader_tasks == NULL) { 919 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 920 firsterr = -ENOMEM; 921 goto unwind; 922 } 923 } 924 925 /* 926 * Create the kthreads and start torturing (oh, those poor little locks). 927 * 928 * TODO: Note that we interleave writers with readers, giving writers a 929 * slight advantage, by creating its kthread first. This can be modified 930 * for very specific needs, or even let the user choose the policy, if 931 * ever wanted. 932 */ 933 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 934 j < cxt.nrealreaders_stress; i++, j++) { 935 if (i >= cxt.nrealwriters_stress) 936 goto create_reader; 937 938 /* Create writer. */ 939 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 940 writer_tasks[i]); 941 if (firsterr) 942 goto unwind; 943 944 create_reader: 945 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 946 continue; 947 /* Create reader. */ 948 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 949 reader_tasks[j]); 950 if (firsterr) 951 goto unwind; 952 } 953 if (stat_interval > 0) { 954 firsterr = torture_create_kthread(lock_torture_stats, NULL, 955 stats_task); 956 if (firsterr) 957 goto unwind; 958 } 959 torture_init_end(); 960 return 0; 961 962 unwind: 963 torture_init_end(); 964 lock_torture_cleanup(); 965 return firsterr; 966 } 967 968 module_init(lock_torture_init); 969 module_exit(lock_torture_cleanup); 970