1 /* 2 * Module-based torture test facility for locking 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2014 19 * 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 21 * Davidlohr Bueso <dave@stgolabs.net> 22 * Based on kernel/rcu/torture.c. 23 */ 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/kthread.h> 27 #include <linux/sched/rt.h> 28 #include <linux/spinlock.h> 29 #include <linux/rwlock.h> 30 #include <linux/mutex.h> 31 #include <linux/rwsem.h> 32 #include <linux/smp.h> 33 #include <linux/interrupt.h> 34 #include <linux/sched.h> 35 #include <linux/atomic.h> 36 #include <linux/moduleparam.h> 37 #include <linux/delay.h> 38 #include <linux/slab.h> 39 #include <linux/percpu-rwsem.h> 40 #include <linux/torture.h> 41 42 MODULE_LICENSE("GPL"); 43 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>"); 44 45 torture_param(int, nwriters_stress, -1, 46 "Number of write-locking stress-test threads"); 47 torture_param(int, nreaders_stress, -1, 48 "Number of read-locking stress-test threads"); 49 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 50 torture_param(int, onoff_interval, 0, 51 "Time between CPU hotplugs (s), 0=disable"); 52 torture_param(int, shuffle_interval, 3, 53 "Number of jiffies between shuffles, 0=disable"); 54 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); 55 torture_param(int, stat_interval, 60, 56 "Number of seconds between stats printk()s"); 57 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 58 torture_param(bool, verbose, true, 59 "Enable verbose debugging printk()s"); 60 61 static char *torture_type = "spin_lock"; 62 module_param(torture_type, charp, 0444); 63 MODULE_PARM_DESC(torture_type, 64 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)"); 65 66 static struct task_struct *stats_task; 67 static struct task_struct **writer_tasks; 68 static struct task_struct **reader_tasks; 69 70 static bool lock_is_write_held; 71 static bool lock_is_read_held; 72 73 struct lock_stress_stats { 74 long n_lock_fail; 75 long n_lock_acquired; 76 }; 77 78 int torture_runnable = IS_ENABLED(MODULE); 79 module_param(torture_runnable, int, 0444); 80 MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init"); 81 82 /* Forward reference. */ 83 static void lock_torture_cleanup(void); 84 85 /* 86 * Operations vector for selecting different types of tests. 87 */ 88 struct lock_torture_ops { 89 void (*init)(void); 90 int (*writelock)(void); 91 void (*write_delay)(struct torture_random_state *trsp); 92 void (*task_boost)(struct torture_random_state *trsp); 93 void (*writeunlock)(void); 94 int (*readlock)(void); 95 void (*read_delay)(struct torture_random_state *trsp); 96 void (*readunlock)(void); 97 98 unsigned long flags; /* for irq spinlocks */ 99 const char *name; 100 }; 101 102 struct lock_torture_cxt { 103 int nrealwriters_stress; 104 int nrealreaders_stress; 105 bool debug_lock; 106 atomic_t n_lock_torture_errors; 107 struct lock_torture_ops *cur_ops; 108 struct lock_stress_stats *lwsa; /* writer statistics */ 109 struct lock_stress_stats *lrsa; /* reader statistics */ 110 }; 111 static struct lock_torture_cxt cxt = { 0, 0, false, 112 ATOMIC_INIT(0), 113 NULL, NULL}; 114 /* 115 * Definitions for lock torture testing. 116 */ 117 118 static int torture_lock_busted_write_lock(void) 119 { 120 return 0; /* BUGGY, do not use in real life!!! */ 121 } 122 123 static void torture_lock_busted_write_delay(struct torture_random_state *trsp) 124 { 125 const unsigned long longdelay_ms = 100; 126 127 /* We want a long delay occasionally to force massive contention. */ 128 if (!(torture_random(trsp) % 129 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 130 mdelay(longdelay_ms); 131 #ifdef CONFIG_PREEMPT 132 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 133 preempt_schedule(); /* Allow test to be preempted. */ 134 #endif 135 } 136 137 static void torture_lock_busted_write_unlock(void) 138 { 139 /* BUGGY, do not use in real life!!! */ 140 } 141 142 static void torture_boost_dummy(struct torture_random_state *trsp) 143 { 144 /* Only rtmutexes care about priority */ 145 } 146 147 static struct lock_torture_ops lock_busted_ops = { 148 .writelock = torture_lock_busted_write_lock, 149 .write_delay = torture_lock_busted_write_delay, 150 .task_boost = torture_boost_dummy, 151 .writeunlock = torture_lock_busted_write_unlock, 152 .readlock = NULL, 153 .read_delay = NULL, 154 .readunlock = NULL, 155 .name = "lock_busted" 156 }; 157 158 static DEFINE_SPINLOCK(torture_spinlock); 159 160 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock) 161 { 162 spin_lock(&torture_spinlock); 163 return 0; 164 } 165 166 static void torture_spin_lock_write_delay(struct torture_random_state *trsp) 167 { 168 const unsigned long shortdelay_us = 2; 169 const unsigned long longdelay_ms = 100; 170 171 /* We want a short delay mostly to emulate likely code, and 172 * we want a long delay occasionally to force massive contention. 173 */ 174 if (!(torture_random(trsp) % 175 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 176 mdelay(longdelay_ms); 177 if (!(torture_random(trsp) % 178 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 179 udelay(shortdelay_us); 180 #ifdef CONFIG_PREEMPT 181 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 182 preempt_schedule(); /* Allow test to be preempted. */ 183 #endif 184 } 185 186 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock) 187 { 188 spin_unlock(&torture_spinlock); 189 } 190 191 static struct lock_torture_ops spin_lock_ops = { 192 .writelock = torture_spin_lock_write_lock, 193 .write_delay = torture_spin_lock_write_delay, 194 .task_boost = torture_boost_dummy, 195 .writeunlock = torture_spin_lock_write_unlock, 196 .readlock = NULL, 197 .read_delay = NULL, 198 .readunlock = NULL, 199 .name = "spin_lock" 200 }; 201 202 static int torture_spin_lock_write_lock_irq(void) 203 __acquires(torture_spinlock) 204 { 205 unsigned long flags; 206 207 spin_lock_irqsave(&torture_spinlock, flags); 208 cxt.cur_ops->flags = flags; 209 return 0; 210 } 211 212 static void torture_lock_spin_write_unlock_irq(void) 213 __releases(torture_spinlock) 214 { 215 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags); 216 } 217 218 static struct lock_torture_ops spin_lock_irq_ops = { 219 .writelock = torture_spin_lock_write_lock_irq, 220 .write_delay = torture_spin_lock_write_delay, 221 .task_boost = torture_boost_dummy, 222 .writeunlock = torture_lock_spin_write_unlock_irq, 223 .readlock = NULL, 224 .read_delay = NULL, 225 .readunlock = NULL, 226 .name = "spin_lock_irq" 227 }; 228 229 static DEFINE_RWLOCK(torture_rwlock); 230 231 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock) 232 { 233 write_lock(&torture_rwlock); 234 return 0; 235 } 236 237 static void torture_rwlock_write_delay(struct torture_random_state *trsp) 238 { 239 const unsigned long shortdelay_us = 2; 240 const unsigned long longdelay_ms = 100; 241 242 /* We want a short delay mostly to emulate likely code, and 243 * we want a long delay occasionally to force massive contention. 244 */ 245 if (!(torture_random(trsp) % 246 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 247 mdelay(longdelay_ms); 248 else 249 udelay(shortdelay_us); 250 } 251 252 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock) 253 { 254 write_unlock(&torture_rwlock); 255 } 256 257 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock) 258 { 259 read_lock(&torture_rwlock); 260 return 0; 261 } 262 263 static void torture_rwlock_read_delay(struct torture_random_state *trsp) 264 { 265 const unsigned long shortdelay_us = 10; 266 const unsigned long longdelay_ms = 100; 267 268 /* We want a short delay mostly to emulate likely code, and 269 * we want a long delay occasionally to force massive contention. 270 */ 271 if (!(torture_random(trsp) % 272 (cxt.nrealreaders_stress * 2000 * longdelay_ms))) 273 mdelay(longdelay_ms); 274 else 275 udelay(shortdelay_us); 276 } 277 278 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock) 279 { 280 read_unlock(&torture_rwlock); 281 } 282 283 static struct lock_torture_ops rw_lock_ops = { 284 .writelock = torture_rwlock_write_lock, 285 .write_delay = torture_rwlock_write_delay, 286 .task_boost = torture_boost_dummy, 287 .writeunlock = torture_rwlock_write_unlock, 288 .readlock = torture_rwlock_read_lock, 289 .read_delay = torture_rwlock_read_delay, 290 .readunlock = torture_rwlock_read_unlock, 291 .name = "rw_lock" 292 }; 293 294 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock) 295 { 296 unsigned long flags; 297 298 write_lock_irqsave(&torture_rwlock, flags); 299 cxt.cur_ops->flags = flags; 300 return 0; 301 } 302 303 static void torture_rwlock_write_unlock_irq(void) 304 __releases(torture_rwlock) 305 { 306 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 307 } 308 309 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock) 310 { 311 unsigned long flags; 312 313 read_lock_irqsave(&torture_rwlock, flags); 314 cxt.cur_ops->flags = flags; 315 return 0; 316 } 317 318 static void torture_rwlock_read_unlock_irq(void) 319 __releases(torture_rwlock) 320 { 321 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags); 322 } 323 324 static struct lock_torture_ops rw_lock_irq_ops = { 325 .writelock = torture_rwlock_write_lock_irq, 326 .write_delay = torture_rwlock_write_delay, 327 .task_boost = torture_boost_dummy, 328 .writeunlock = torture_rwlock_write_unlock_irq, 329 .readlock = torture_rwlock_read_lock_irq, 330 .read_delay = torture_rwlock_read_delay, 331 .readunlock = torture_rwlock_read_unlock_irq, 332 .name = "rw_lock_irq" 333 }; 334 335 static DEFINE_MUTEX(torture_mutex); 336 337 static int torture_mutex_lock(void) __acquires(torture_mutex) 338 { 339 mutex_lock(&torture_mutex); 340 return 0; 341 } 342 343 static void torture_mutex_delay(struct torture_random_state *trsp) 344 { 345 const unsigned long longdelay_ms = 100; 346 347 /* We want a long delay occasionally to force massive contention. */ 348 if (!(torture_random(trsp) % 349 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 350 mdelay(longdelay_ms * 5); 351 else 352 mdelay(longdelay_ms / 5); 353 #ifdef CONFIG_PREEMPT 354 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 355 preempt_schedule(); /* Allow test to be preempted. */ 356 #endif 357 } 358 359 static void torture_mutex_unlock(void) __releases(torture_mutex) 360 { 361 mutex_unlock(&torture_mutex); 362 } 363 364 static struct lock_torture_ops mutex_lock_ops = { 365 .writelock = torture_mutex_lock, 366 .write_delay = torture_mutex_delay, 367 .task_boost = torture_boost_dummy, 368 .writeunlock = torture_mutex_unlock, 369 .readlock = NULL, 370 .read_delay = NULL, 371 .readunlock = NULL, 372 .name = "mutex_lock" 373 }; 374 375 #ifdef CONFIG_RT_MUTEXES 376 static DEFINE_RT_MUTEX(torture_rtmutex); 377 378 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex) 379 { 380 rt_mutex_lock(&torture_rtmutex); 381 return 0; 382 } 383 384 static void torture_rtmutex_boost(struct torture_random_state *trsp) 385 { 386 int policy; 387 struct sched_param param; 388 const unsigned int factor = 50000; /* yes, quite arbitrary */ 389 390 if (!rt_task(current)) { 391 /* 392 * Boost priority once every ~50k operations. When the 393 * task tries to take the lock, the rtmutex it will account 394 * for the new priority, and do any corresponding pi-dance. 395 */ 396 if (trsp && !(torture_random(trsp) % 397 (cxt.nrealwriters_stress * factor))) { 398 policy = SCHED_FIFO; 399 param.sched_priority = MAX_RT_PRIO - 1; 400 } else /* common case, do nothing */ 401 return; 402 } else { 403 /* 404 * The task will remain boosted for another ~500k operations, 405 * then restored back to its original prio, and so forth. 406 * 407 * When @trsp is nil, we want to force-reset the task for 408 * stopping the kthread. 409 */ 410 if (!trsp || !(torture_random(trsp) % 411 (cxt.nrealwriters_stress * factor * 2))) { 412 policy = SCHED_NORMAL; 413 param.sched_priority = 0; 414 } else /* common case, do nothing */ 415 return; 416 } 417 418 sched_setscheduler_nocheck(current, policy, ¶m); 419 } 420 421 static void torture_rtmutex_delay(struct torture_random_state *trsp) 422 { 423 const unsigned long shortdelay_us = 2; 424 const unsigned long longdelay_ms = 100; 425 426 /* 427 * We want a short delay mostly to emulate likely code, and 428 * we want a long delay occasionally to force massive contention. 429 */ 430 if (!(torture_random(trsp) % 431 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 432 mdelay(longdelay_ms); 433 if (!(torture_random(trsp) % 434 (cxt.nrealwriters_stress * 2 * shortdelay_us))) 435 udelay(shortdelay_us); 436 #ifdef CONFIG_PREEMPT 437 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 438 preempt_schedule(); /* Allow test to be preempted. */ 439 #endif 440 } 441 442 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex) 443 { 444 rt_mutex_unlock(&torture_rtmutex); 445 } 446 447 static struct lock_torture_ops rtmutex_lock_ops = { 448 .writelock = torture_rtmutex_lock, 449 .write_delay = torture_rtmutex_delay, 450 .task_boost = torture_rtmutex_boost, 451 .writeunlock = torture_rtmutex_unlock, 452 .readlock = NULL, 453 .read_delay = NULL, 454 .readunlock = NULL, 455 .name = "rtmutex_lock" 456 }; 457 #endif 458 459 static DECLARE_RWSEM(torture_rwsem); 460 static int torture_rwsem_down_write(void) __acquires(torture_rwsem) 461 { 462 down_write(&torture_rwsem); 463 return 0; 464 } 465 466 static void torture_rwsem_write_delay(struct torture_random_state *trsp) 467 { 468 const unsigned long longdelay_ms = 100; 469 470 /* We want a long delay occasionally to force massive contention. */ 471 if (!(torture_random(trsp) % 472 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 473 mdelay(longdelay_ms * 10); 474 else 475 mdelay(longdelay_ms / 10); 476 #ifdef CONFIG_PREEMPT 477 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000))) 478 preempt_schedule(); /* Allow test to be preempted. */ 479 #endif 480 } 481 482 static void torture_rwsem_up_write(void) __releases(torture_rwsem) 483 { 484 up_write(&torture_rwsem); 485 } 486 487 static int torture_rwsem_down_read(void) __acquires(torture_rwsem) 488 { 489 down_read(&torture_rwsem); 490 return 0; 491 } 492 493 static void torture_rwsem_read_delay(struct torture_random_state *trsp) 494 { 495 const unsigned long longdelay_ms = 100; 496 497 /* We want a long delay occasionally to force massive contention. */ 498 if (!(torture_random(trsp) % 499 (cxt.nrealwriters_stress * 2000 * longdelay_ms))) 500 mdelay(longdelay_ms * 2); 501 else 502 mdelay(longdelay_ms / 2); 503 #ifdef CONFIG_PREEMPT 504 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000))) 505 preempt_schedule(); /* Allow test to be preempted. */ 506 #endif 507 } 508 509 static void torture_rwsem_up_read(void) __releases(torture_rwsem) 510 { 511 up_read(&torture_rwsem); 512 } 513 514 static struct lock_torture_ops rwsem_lock_ops = { 515 .writelock = torture_rwsem_down_write, 516 .write_delay = torture_rwsem_write_delay, 517 .task_boost = torture_boost_dummy, 518 .writeunlock = torture_rwsem_up_write, 519 .readlock = torture_rwsem_down_read, 520 .read_delay = torture_rwsem_read_delay, 521 .readunlock = torture_rwsem_up_read, 522 .name = "rwsem_lock" 523 }; 524 525 #include <linux/percpu-rwsem.h> 526 static struct percpu_rw_semaphore pcpu_rwsem; 527 528 void torture_percpu_rwsem_init(void) 529 { 530 BUG_ON(percpu_init_rwsem(&pcpu_rwsem)); 531 } 532 533 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem) 534 { 535 percpu_down_write(&pcpu_rwsem); 536 return 0; 537 } 538 539 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem) 540 { 541 percpu_up_write(&pcpu_rwsem); 542 } 543 544 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem) 545 { 546 percpu_down_read(&pcpu_rwsem); 547 return 0; 548 } 549 550 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem) 551 { 552 percpu_up_read(&pcpu_rwsem); 553 } 554 555 static struct lock_torture_ops percpu_rwsem_lock_ops = { 556 .init = torture_percpu_rwsem_init, 557 .writelock = torture_percpu_rwsem_down_write, 558 .write_delay = torture_rwsem_write_delay, 559 .task_boost = torture_boost_dummy, 560 .writeunlock = torture_percpu_rwsem_up_write, 561 .readlock = torture_percpu_rwsem_down_read, 562 .read_delay = torture_rwsem_read_delay, 563 .readunlock = torture_percpu_rwsem_up_read, 564 .name = "percpu_rwsem_lock" 565 }; 566 567 /* 568 * Lock torture writer kthread. Repeatedly acquires and releases 569 * the lock, checking for duplicate acquisitions. 570 */ 571 static int lock_torture_writer(void *arg) 572 { 573 struct lock_stress_stats *lwsp = arg; 574 static DEFINE_TORTURE_RANDOM(rand); 575 576 VERBOSE_TOROUT_STRING("lock_torture_writer task started"); 577 set_user_nice(current, MAX_NICE); 578 579 do { 580 if ((torture_random(&rand) & 0xfffff) == 0) 581 schedule_timeout_uninterruptible(1); 582 583 cxt.cur_ops->task_boost(&rand); 584 cxt.cur_ops->writelock(); 585 if (WARN_ON_ONCE(lock_is_write_held)) 586 lwsp->n_lock_fail++; 587 lock_is_write_held = 1; 588 if (WARN_ON_ONCE(lock_is_read_held)) 589 lwsp->n_lock_fail++; /* rare, but... */ 590 591 lwsp->n_lock_acquired++; 592 cxt.cur_ops->write_delay(&rand); 593 lock_is_write_held = 0; 594 cxt.cur_ops->writeunlock(); 595 596 stutter_wait("lock_torture_writer"); 597 } while (!torture_must_stop()); 598 599 cxt.cur_ops->task_boost(NULL); /* reset prio */ 600 torture_kthread_stopping("lock_torture_writer"); 601 return 0; 602 } 603 604 /* 605 * Lock torture reader kthread. Repeatedly acquires and releases 606 * the reader lock. 607 */ 608 static int lock_torture_reader(void *arg) 609 { 610 struct lock_stress_stats *lrsp = arg; 611 static DEFINE_TORTURE_RANDOM(rand); 612 613 VERBOSE_TOROUT_STRING("lock_torture_reader task started"); 614 set_user_nice(current, MAX_NICE); 615 616 do { 617 if ((torture_random(&rand) & 0xfffff) == 0) 618 schedule_timeout_uninterruptible(1); 619 620 cxt.cur_ops->readlock(); 621 lock_is_read_held = 1; 622 if (WARN_ON_ONCE(lock_is_write_held)) 623 lrsp->n_lock_fail++; /* rare, but... */ 624 625 lrsp->n_lock_acquired++; 626 cxt.cur_ops->read_delay(&rand); 627 lock_is_read_held = 0; 628 cxt.cur_ops->readunlock(); 629 630 stutter_wait("lock_torture_reader"); 631 } while (!torture_must_stop()); 632 torture_kthread_stopping("lock_torture_reader"); 633 return 0; 634 } 635 636 /* 637 * Create an lock-torture-statistics message in the specified buffer. 638 */ 639 static void __torture_print_stats(char *page, 640 struct lock_stress_stats *statp, bool write) 641 { 642 bool fail = 0; 643 int i, n_stress; 644 long max = 0; 645 long min = statp[0].n_lock_acquired; 646 long long sum = 0; 647 648 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress; 649 for (i = 0; i < n_stress; i++) { 650 if (statp[i].n_lock_fail) 651 fail = true; 652 sum += statp[i].n_lock_acquired; 653 if (max < statp[i].n_lock_fail) 654 max = statp[i].n_lock_fail; 655 if (min > statp[i].n_lock_fail) 656 min = statp[i].n_lock_fail; 657 } 658 page += sprintf(page, 659 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n", 660 write ? "Writes" : "Reads ", 661 sum, max, min, max / 2 > min ? "???" : "", 662 fail, fail ? "!!!" : ""); 663 if (fail) 664 atomic_inc(&cxt.n_lock_torture_errors); 665 } 666 667 /* 668 * Print torture statistics. Caller must ensure that there is only one 669 * call to this function at a given time!!! This is normally accomplished 670 * by relying on the module system to only have one copy of the module 671 * loaded, and then by giving the lock_torture_stats kthread full control 672 * (or the init/cleanup functions when lock_torture_stats thread is not 673 * running). 674 */ 675 static void lock_torture_stats_print(void) 676 { 677 int size = cxt.nrealwriters_stress * 200 + 8192; 678 char *buf; 679 680 if (cxt.cur_ops->readlock) 681 size += cxt.nrealreaders_stress * 200 + 8192; 682 683 buf = kmalloc(size, GFP_KERNEL); 684 if (!buf) { 685 pr_err("lock_torture_stats_print: Out of memory, need: %d", 686 size); 687 return; 688 } 689 690 __torture_print_stats(buf, cxt.lwsa, true); 691 pr_alert("%s", buf); 692 kfree(buf); 693 694 if (cxt.cur_ops->readlock) { 695 buf = kmalloc(size, GFP_KERNEL); 696 if (!buf) { 697 pr_err("lock_torture_stats_print: Out of memory, need: %d", 698 size); 699 return; 700 } 701 702 __torture_print_stats(buf, cxt.lrsa, false); 703 pr_alert("%s", buf); 704 kfree(buf); 705 } 706 } 707 708 /* 709 * Periodically prints torture statistics, if periodic statistics printing 710 * was specified via the stat_interval module parameter. 711 * 712 * No need to worry about fullstop here, since this one doesn't reference 713 * volatile state or register callbacks. 714 */ 715 static int lock_torture_stats(void *arg) 716 { 717 VERBOSE_TOROUT_STRING("lock_torture_stats task started"); 718 do { 719 schedule_timeout_interruptible(stat_interval * HZ); 720 lock_torture_stats_print(); 721 torture_shutdown_absorb("lock_torture_stats"); 722 } while (!torture_must_stop()); 723 torture_kthread_stopping("lock_torture_stats"); 724 return 0; 725 } 726 727 static inline void 728 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops, 729 const char *tag) 730 { 731 pr_alert("%s" TORTURE_FLAG 732 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n", 733 torture_type, tag, cxt.debug_lock ? " [debug]": "", 734 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval, 735 verbose, shuffle_interval, stutter, shutdown_secs, 736 onoff_interval, onoff_holdoff); 737 } 738 739 static void lock_torture_cleanup(void) 740 { 741 int i; 742 743 if (torture_cleanup_begin()) 744 return; 745 746 /* 747 * Indicates early cleanup, meaning that the test has not run, 748 * such as when passing bogus args when loading the module. As 749 * such, only perform the underlying torture-specific cleanups, 750 * and avoid anything related to locktorture. 751 */ 752 if (!cxt.lwsa) 753 goto end; 754 755 if (writer_tasks) { 756 for (i = 0; i < cxt.nrealwriters_stress; i++) 757 torture_stop_kthread(lock_torture_writer, 758 writer_tasks[i]); 759 kfree(writer_tasks); 760 writer_tasks = NULL; 761 } 762 763 if (reader_tasks) { 764 for (i = 0; i < cxt.nrealreaders_stress; i++) 765 torture_stop_kthread(lock_torture_reader, 766 reader_tasks[i]); 767 kfree(reader_tasks); 768 reader_tasks = NULL; 769 } 770 771 torture_stop_kthread(lock_torture_stats, stats_task); 772 lock_torture_stats_print(); /* -After- the stats thread is stopped! */ 773 774 if (atomic_read(&cxt.n_lock_torture_errors)) 775 lock_torture_print_module_parms(cxt.cur_ops, 776 "End of test: FAILURE"); 777 else if (torture_onoff_failures()) 778 lock_torture_print_module_parms(cxt.cur_ops, 779 "End of test: LOCK_HOTPLUG"); 780 else 781 lock_torture_print_module_parms(cxt.cur_ops, 782 "End of test: SUCCESS"); 783 end: 784 torture_cleanup_end(); 785 } 786 787 static int __init lock_torture_init(void) 788 { 789 int i, j; 790 int firsterr = 0; 791 static struct lock_torture_ops *torture_ops[] = { 792 &lock_busted_ops, 793 &spin_lock_ops, &spin_lock_irq_ops, 794 &rw_lock_ops, &rw_lock_irq_ops, 795 &mutex_lock_ops, 796 #ifdef CONFIG_RT_MUTEXES 797 &rtmutex_lock_ops, 798 #endif 799 &rwsem_lock_ops, 800 &percpu_rwsem_lock_ops, 801 }; 802 803 if (!torture_init_begin(torture_type, verbose, &torture_runnable)) 804 return -EBUSY; 805 806 /* Process args and tell the world that the torturer is on the job. */ 807 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 808 cxt.cur_ops = torture_ops[i]; 809 if (strcmp(torture_type, cxt.cur_ops->name) == 0) 810 break; 811 } 812 if (i == ARRAY_SIZE(torture_ops)) { 813 pr_alert("lock-torture: invalid torture type: \"%s\"\n", 814 torture_type); 815 pr_alert("lock-torture types:"); 816 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) 817 pr_alert(" %s", torture_ops[i]->name); 818 pr_alert("\n"); 819 firsterr = -EINVAL; 820 goto unwind; 821 } 822 if (cxt.cur_ops->init) 823 cxt.cur_ops->init(); 824 825 if (nwriters_stress >= 0) 826 cxt.nrealwriters_stress = nwriters_stress; 827 else 828 cxt.nrealwriters_stress = 2 * num_online_cpus(); 829 830 #ifdef CONFIG_DEBUG_MUTEXES 831 if (strncmp(torture_type, "mutex", 5) == 0) 832 cxt.debug_lock = true; 833 #endif 834 #ifdef CONFIG_DEBUG_RT_MUTEXES 835 if (strncmp(torture_type, "rtmutex", 7) == 0) 836 cxt.debug_lock = true; 837 #endif 838 #ifdef CONFIG_DEBUG_SPINLOCK 839 if ((strncmp(torture_type, "spin", 4) == 0) || 840 (strncmp(torture_type, "rw_lock", 7) == 0)) 841 cxt.debug_lock = true; 842 #endif 843 844 /* Initialize the statistics so that each run gets its own numbers. */ 845 846 lock_is_write_held = 0; 847 cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL); 848 if (cxt.lwsa == NULL) { 849 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory"); 850 firsterr = -ENOMEM; 851 goto unwind; 852 } 853 for (i = 0; i < cxt.nrealwriters_stress; i++) { 854 cxt.lwsa[i].n_lock_fail = 0; 855 cxt.lwsa[i].n_lock_acquired = 0; 856 } 857 858 if (cxt.cur_ops->readlock) { 859 if (nreaders_stress >= 0) 860 cxt.nrealreaders_stress = nreaders_stress; 861 else { 862 /* 863 * By default distribute evenly the number of 864 * readers and writers. We still run the same number 865 * of threads as the writer-only locks default. 866 */ 867 if (nwriters_stress < 0) /* user doesn't care */ 868 cxt.nrealwriters_stress = num_online_cpus(); 869 cxt.nrealreaders_stress = cxt.nrealwriters_stress; 870 } 871 872 lock_is_read_held = 0; 873 cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL); 874 if (cxt.lrsa == NULL) { 875 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory"); 876 firsterr = -ENOMEM; 877 kfree(cxt.lwsa); 878 cxt.lwsa = NULL; 879 goto unwind; 880 } 881 882 for (i = 0; i < cxt.nrealreaders_stress; i++) { 883 cxt.lrsa[i].n_lock_fail = 0; 884 cxt.lrsa[i].n_lock_acquired = 0; 885 } 886 } 887 888 lock_torture_print_module_parms(cxt.cur_ops, "Start of test"); 889 890 /* Prepare torture context. */ 891 if (onoff_interval > 0) { 892 firsterr = torture_onoff_init(onoff_holdoff * HZ, 893 onoff_interval * HZ); 894 if (firsterr) 895 goto unwind; 896 } 897 if (shuffle_interval > 0) { 898 firsterr = torture_shuffle_init(shuffle_interval); 899 if (firsterr) 900 goto unwind; 901 } 902 if (shutdown_secs > 0) { 903 firsterr = torture_shutdown_init(shutdown_secs, 904 lock_torture_cleanup); 905 if (firsterr) 906 goto unwind; 907 } 908 if (stutter > 0) { 909 firsterr = torture_stutter_init(stutter); 910 if (firsterr) 911 goto unwind; 912 } 913 914 writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]), 915 GFP_KERNEL); 916 if (writer_tasks == NULL) { 917 VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory"); 918 firsterr = -ENOMEM; 919 goto unwind; 920 } 921 922 if (cxt.cur_ops->readlock) { 923 reader_tasks = kzalloc(cxt.nrealreaders_stress * sizeof(reader_tasks[0]), 924 GFP_KERNEL); 925 if (reader_tasks == NULL) { 926 VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); 927 firsterr = -ENOMEM; 928 goto unwind; 929 } 930 } 931 932 /* 933 * Create the kthreads and start torturing (oh, those poor little locks). 934 * 935 * TODO: Note that we interleave writers with readers, giving writers a 936 * slight advantage, by creating its kthread first. This can be modified 937 * for very specific needs, or even let the user choose the policy, if 938 * ever wanted. 939 */ 940 for (i = 0, j = 0; i < cxt.nrealwriters_stress || 941 j < cxt.nrealreaders_stress; i++, j++) { 942 if (i >= cxt.nrealwriters_stress) 943 goto create_reader; 944 945 /* Create writer. */ 946 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i], 947 writer_tasks[i]); 948 if (firsterr) 949 goto unwind; 950 951 create_reader: 952 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress)) 953 continue; 954 /* Create reader. */ 955 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j], 956 reader_tasks[j]); 957 if (firsterr) 958 goto unwind; 959 } 960 if (stat_interval > 0) { 961 firsterr = torture_create_kthread(lock_torture_stats, NULL, 962 stats_task); 963 if (firsterr) 964 goto unwind; 965 } 966 torture_init_end(); 967 return 0; 968 969 unwind: 970 torture_init_end(); 971 lock_torture_cleanup(); 972 return firsterr; 973 } 974 975 module_init(lock_torture_init); 976 module_exit(lock_torture_cleanup); 977