1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Torture test for smp_call_function() and friends. 4 // 5 // Copyright (C) Facebook, 2020. 6 // 7 // Author: Paul E. McKenney <paulmck@kernel.org> 8 9 #define pr_fmt(fmt) fmt 10 11 #include <linux/atomic.h> 12 #include <linux/bitops.h> 13 #include <linux/completion.h> 14 #include <linux/cpu.h> 15 #include <linux/delay.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/kthread.h> 20 #include <linux/kernel.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/notifier.h> 25 #include <linux/percpu.h> 26 #include <linux/rcupdate.h> 27 #include <linux/rcupdate_trace.h> 28 #include <linux/reboot.h> 29 #include <linux/sched.h> 30 #include <linux/spinlock.h> 31 #include <linux/smp.h> 32 #include <linux/stat.h> 33 #include <linux/srcu.h> 34 #include <linux/slab.h> 35 #include <linux/torture.h> 36 #include <linux/types.h> 37 38 #define SCFTORT_STRING "scftorture" 39 #define SCFTORT_FLAG SCFTORT_STRING ": " 40 41 #define VERBOSE_SCFTORTOUT(s, x...) \ 42 do { if (verbose) pr_alert(SCFTORT_FLAG s "\n", ## x); } while (0) 43 44 #define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x) 45 46 MODULE_LICENSE("GPL"); 47 MODULE_AUTHOR("Paul E. McKenney <paulmck@kernel.org>"); 48 49 // Wait until there are multiple CPUs before starting test. 50 torture_param(int, holdoff, IS_BUILTIN(CONFIG_SCF_TORTURE_TEST) ? 10 : 0, 51 "Holdoff time before test start (s)"); 52 torture_param(int, longwait, 0, "Include ridiculously long waits? (seconds)"); 53 torture_param(int, nthreads, -1, "# threads, defaults to -1 for all CPUs."); 54 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); 55 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable"); 56 torture_param(int, shutdown_secs, 0, "Shutdown time (ms), <= zero to disable."); 57 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s."); 58 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); 59 torture_param(bool, use_cpus_read_lock, 0, "Use cpus_read_lock() to exclude CPU hotplug."); 60 torture_param(int, verbose, 0, "Enable verbose debugging printk()s"); 61 torture_param(int, weight_resched, -1, "Testing weight for resched_cpu() operations."); 62 torture_param(int, weight_single, -1, "Testing weight for single-CPU no-wait operations."); 63 torture_param(int, weight_single_rpc, -1, "Testing weight for single-CPU RPC operations."); 64 torture_param(int, weight_single_wait, -1, "Testing weight for single-CPU operations."); 65 torture_param(int, weight_many, -1, "Testing weight for multi-CPU no-wait operations."); 66 torture_param(int, weight_many_wait, -1, "Testing weight for multi-CPU operations."); 67 torture_param(int, weight_all, -1, "Testing weight for all-CPU no-wait operations."); 68 torture_param(int, weight_all_wait, -1, "Testing weight for all-CPU operations."); 69 70 char *torture_type = ""; 71 72 #ifdef MODULE 73 # define SCFTORT_SHUTDOWN 0 74 #else 75 # define SCFTORT_SHUTDOWN 1 76 #endif 77 78 torture_param(bool, shutdown, SCFTORT_SHUTDOWN, "Shutdown at end of torture test."); 79 80 struct scf_statistics { 81 struct task_struct *task; 82 int cpu; 83 long long n_resched; 84 long long n_single; 85 long long n_single_ofl; 86 long long n_single_rpc; 87 long long n_single_rpc_ofl; 88 long long n_single_wait; 89 long long n_single_wait_ofl; 90 long long n_many; 91 long long n_many_wait; 92 long long n_all; 93 long long n_all_wait; 94 }; 95 96 static struct scf_statistics *scf_stats_p; 97 static struct task_struct *scf_torture_stats_task; 98 static DEFINE_PER_CPU(long long, scf_invoked_count); 99 100 // Data for random primitive selection 101 #define SCF_PRIM_RESCHED 0 102 #define SCF_PRIM_SINGLE 1 103 #define SCF_PRIM_SINGLE_RPC 2 104 #define SCF_PRIM_MANY 3 105 #define SCF_PRIM_ALL 4 106 #define SCF_NPRIMS 8 // Need wait and no-wait versions of each, 107 // except for SCF_PRIM_RESCHED and 108 // SCF_PRIM_SINGLE_RPC. 109 110 static char *scf_prim_name[] = { 111 "resched_cpu", 112 "smp_call_function_single", 113 "smp_call_function_single_rpc", 114 "smp_call_function_many", 115 "smp_call_function", 116 }; 117 118 struct scf_selector { 119 unsigned long scfs_weight; 120 int scfs_prim; 121 bool scfs_wait; 122 }; 123 static struct scf_selector scf_sel_array[SCF_NPRIMS]; 124 static int scf_sel_array_len; 125 static unsigned long scf_sel_totweight; 126 127 // Communicate between caller and handler. 128 struct scf_check { 129 bool scfc_in; 130 bool scfc_out; 131 int scfc_cpu; // -1 for not _single(). 132 bool scfc_wait; 133 bool scfc_rpc; 134 struct completion scfc_completion; 135 }; 136 137 // Use to wait for all threads to start. 138 static atomic_t n_started; 139 static atomic_t n_errs; 140 static atomic_t n_mb_in_errs; 141 static atomic_t n_mb_out_errs; 142 static atomic_t n_alloc_errs; 143 static bool scfdone; 144 static char *bangstr = ""; 145 146 static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand); 147 148 extern void resched_cpu(int cpu); // An alternative IPI vector. 149 150 // Print torture statistics. Caller must ensure serialization. 151 static void scf_torture_stats_print(void) 152 { 153 int cpu; 154 int i; 155 long long invoked_count = 0; 156 bool isdone = READ_ONCE(scfdone); 157 struct scf_statistics scfs = {}; 158 159 for_each_possible_cpu(cpu) 160 invoked_count += data_race(per_cpu(scf_invoked_count, cpu)); 161 for (i = 0; i < nthreads; i++) { 162 scfs.n_resched += scf_stats_p[i].n_resched; 163 scfs.n_single += scf_stats_p[i].n_single; 164 scfs.n_single_ofl += scf_stats_p[i].n_single_ofl; 165 scfs.n_single_rpc += scf_stats_p[i].n_single_rpc; 166 scfs.n_single_wait += scf_stats_p[i].n_single_wait; 167 scfs.n_single_wait_ofl += scf_stats_p[i].n_single_wait_ofl; 168 scfs.n_many += scf_stats_p[i].n_many; 169 scfs.n_many_wait += scf_stats_p[i].n_many_wait; 170 scfs.n_all += scf_stats_p[i].n_all; 171 scfs.n_all_wait += scf_stats_p[i].n_all_wait; 172 } 173 if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || 174 atomic_read(&n_mb_out_errs) || atomic_read(&n_alloc_errs)) 175 bangstr = "!!! "; 176 pr_alert("%s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld ", 177 SCFTORT_FLAG, bangstr, isdone ? "VER" : "ver", invoked_count, scfs.n_resched, 178 scfs.n_single, scfs.n_single_wait, scfs.n_single_ofl, scfs.n_single_wait_ofl, 179 scfs.n_single_rpc, scfs.n_single_rpc_ofl, 180 scfs.n_many, scfs.n_many_wait, scfs.n_all, scfs.n_all_wait); 181 torture_onoff_stats(); 182 pr_cont("ste: %d stnmie: %d stnmoe: %d staf: %d\n", atomic_read(&n_errs), 183 atomic_read(&n_mb_in_errs), atomic_read(&n_mb_out_errs), 184 atomic_read(&n_alloc_errs)); 185 } 186 187 // Periodically prints torture statistics, if periodic statistics printing 188 // was specified via the stat_interval module parameter. 189 static int 190 scf_torture_stats(void *arg) 191 { 192 VERBOSE_TOROUT_STRING("scf_torture_stats task started"); 193 do { 194 schedule_timeout_interruptible(stat_interval * HZ); 195 scf_torture_stats_print(); 196 torture_shutdown_absorb("scf_torture_stats"); 197 } while (!torture_must_stop()); 198 torture_kthread_stopping("scf_torture_stats"); 199 return 0; 200 } 201 202 // Add a primitive to the scf_sel_array[]. 203 static void scf_sel_add(unsigned long weight, int prim, bool wait) 204 { 205 struct scf_selector *scfsp = &scf_sel_array[scf_sel_array_len]; 206 207 // If no weight, if array would overflow, if computing three-place 208 // percentages would overflow, or if the scf_prim_name[] array would 209 // overflow, don't bother. In the last three two cases, complain. 210 if (!weight || 211 WARN_ON_ONCE(scf_sel_array_len >= ARRAY_SIZE(scf_sel_array)) || 212 WARN_ON_ONCE(0 - 100000 * weight <= 100000 * scf_sel_totweight) || 213 WARN_ON_ONCE(prim >= ARRAY_SIZE(scf_prim_name))) 214 return; 215 scf_sel_totweight += weight; 216 scfsp->scfs_weight = scf_sel_totweight; 217 scfsp->scfs_prim = prim; 218 scfsp->scfs_wait = wait; 219 scf_sel_array_len++; 220 } 221 222 // Dump out weighting percentages for scf_prim_name[] array. 223 static void scf_sel_dump(void) 224 { 225 int i; 226 unsigned long oldw = 0; 227 struct scf_selector *scfsp; 228 unsigned long w; 229 230 for (i = 0; i < scf_sel_array_len; i++) { 231 scfsp = &scf_sel_array[i]; 232 w = (scfsp->scfs_weight - oldw) * 100000 / scf_sel_totweight; 233 pr_info("%s: %3lu.%03lu %s(%s)\n", __func__, w / 1000, w % 1000, 234 scf_prim_name[scfsp->scfs_prim], 235 scfsp->scfs_wait ? "wait" : "nowait"); 236 oldw = scfsp->scfs_weight; 237 } 238 } 239 240 // Randomly pick a primitive and wait/nowait, based on weightings. 241 static struct scf_selector *scf_sel_rand(struct torture_random_state *trsp) 242 { 243 int i; 244 unsigned long w = torture_random(trsp) % (scf_sel_totweight + 1); 245 246 for (i = 0; i < scf_sel_array_len; i++) 247 if (scf_sel_array[i].scfs_weight >= w) 248 return &scf_sel_array[i]; 249 WARN_ON_ONCE(1); 250 return &scf_sel_array[0]; 251 } 252 253 // Update statistics and occasionally burn up mass quantities of CPU time, 254 // if told to do so via scftorture.longwait. Otherwise, occasionally burn 255 // a little bit. 256 static void scf_handler(void *scfc_in) 257 { 258 int i; 259 int j; 260 unsigned long r = torture_random(this_cpu_ptr(&scf_torture_rand)); 261 struct scf_check *scfcp = scfc_in; 262 263 if (likely(scfcp)) { 264 WRITE_ONCE(scfcp->scfc_out, false); // For multiple receivers. 265 if (WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp->scfc_in)))) 266 atomic_inc(&n_mb_in_errs); 267 } 268 this_cpu_inc(scf_invoked_count); 269 if (longwait <= 0) { 270 if (!(r & 0xffc0)) { 271 udelay(r & 0x3f); 272 goto out; 273 } 274 } 275 if (r & 0xfff) 276 goto out; 277 r = (r >> 12); 278 if (longwait <= 0) { 279 udelay((r & 0xff) + 1); 280 goto out; 281 } 282 r = r % longwait + 1; 283 for (i = 0; i < r; i++) { 284 for (j = 0; j < 1000; j++) { 285 udelay(1000); 286 cpu_relax(); 287 } 288 } 289 out: 290 if (unlikely(!scfcp)) 291 return; 292 if (scfcp->scfc_wait) { 293 WRITE_ONCE(scfcp->scfc_out, true); 294 if (scfcp->scfc_rpc) 295 complete(&scfcp->scfc_completion); 296 } else { 297 kfree(scfcp); 298 } 299 } 300 301 // As above, but check for correct CPU. 302 static void scf_handler_1(void *scfc_in) 303 { 304 struct scf_check *scfcp = scfc_in; 305 306 if (likely(scfcp) && WARN_ONCE(smp_processor_id() != scfcp->scfc_cpu, "%s: Wanted CPU %d got CPU %d\n", __func__, scfcp->scfc_cpu, smp_processor_id())) { 307 atomic_inc(&n_errs); 308 } 309 scf_handler(scfcp); 310 } 311 312 // Randomly do an smp_call_function*() invocation. 313 static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp) 314 { 315 uintptr_t cpu; 316 int ret = 0; 317 struct scf_check *scfcp = NULL; 318 struct scf_selector *scfsp = scf_sel_rand(trsp); 319 320 if (use_cpus_read_lock) 321 cpus_read_lock(); 322 else 323 preempt_disable(); 324 if (scfsp->scfs_prim == SCF_PRIM_SINGLE || scfsp->scfs_wait) { 325 scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC); 326 if (WARN_ON_ONCE(!scfcp)) { 327 atomic_inc(&n_alloc_errs); 328 } else { 329 scfcp->scfc_cpu = -1; 330 scfcp->scfc_wait = scfsp->scfs_wait; 331 scfcp->scfc_out = false; 332 scfcp->scfc_rpc = false; 333 } 334 } 335 switch (scfsp->scfs_prim) { 336 case SCF_PRIM_RESCHED: 337 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) { 338 cpu = torture_random(trsp) % nr_cpu_ids; 339 scfp->n_resched++; 340 resched_cpu(cpu); 341 this_cpu_inc(scf_invoked_count); 342 } 343 break; 344 case SCF_PRIM_SINGLE: 345 cpu = torture_random(trsp) % nr_cpu_ids; 346 if (scfsp->scfs_wait) 347 scfp->n_single_wait++; 348 else 349 scfp->n_single++; 350 if (scfcp) { 351 scfcp->scfc_cpu = cpu; 352 barrier(); // Prevent race-reduction compiler optimizations. 353 scfcp->scfc_in = true; 354 } 355 ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait); 356 if (ret) { 357 if (scfsp->scfs_wait) 358 scfp->n_single_wait_ofl++; 359 else 360 scfp->n_single_ofl++; 361 kfree(scfcp); 362 scfcp = NULL; 363 } 364 break; 365 case SCF_PRIM_SINGLE_RPC: 366 if (!scfcp) 367 break; 368 cpu = torture_random(trsp) % nr_cpu_ids; 369 scfp->n_single_rpc++; 370 scfcp->scfc_cpu = cpu; 371 scfcp->scfc_wait = true; 372 init_completion(&scfcp->scfc_completion); 373 scfcp->scfc_rpc = true; 374 barrier(); // Prevent race-reduction compiler optimizations. 375 scfcp->scfc_in = true; 376 ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, 0); 377 if (!ret) { 378 if (use_cpus_read_lock) 379 cpus_read_unlock(); 380 else 381 preempt_enable(); 382 wait_for_completion(&scfcp->scfc_completion); 383 if (use_cpus_read_lock) 384 cpus_read_lock(); 385 else 386 preempt_disable(); 387 } else { 388 scfp->n_single_rpc_ofl++; 389 kfree(scfcp); 390 scfcp = NULL; 391 } 392 break; 393 case SCF_PRIM_MANY: 394 if (scfsp->scfs_wait) 395 scfp->n_many_wait++; 396 else 397 scfp->n_many++; 398 if (scfcp) { 399 barrier(); // Prevent race-reduction compiler optimizations. 400 scfcp->scfc_in = true; 401 } 402 smp_call_function_many(cpu_online_mask, scf_handler, scfcp, scfsp->scfs_wait); 403 break; 404 case SCF_PRIM_ALL: 405 if (scfsp->scfs_wait) 406 scfp->n_all_wait++; 407 else 408 scfp->n_all++; 409 if (scfcp) { 410 barrier(); // Prevent race-reduction compiler optimizations. 411 scfcp->scfc_in = true; 412 } 413 smp_call_function(scf_handler, scfcp, scfsp->scfs_wait); 414 break; 415 default: 416 WARN_ON_ONCE(1); 417 if (scfcp) 418 scfcp->scfc_out = true; 419 } 420 if (scfcp && scfsp->scfs_wait) { 421 if (WARN_ON_ONCE((num_online_cpus() > 1 || scfsp->scfs_prim == SCF_PRIM_SINGLE) && 422 !scfcp->scfc_out)) { 423 pr_warn("%s: Memory-ordering failure, scfs_prim: %d.\n", __func__, scfsp->scfs_prim); 424 atomic_inc(&n_mb_out_errs); // Leak rather than trash! 425 } else { 426 kfree(scfcp); 427 } 428 barrier(); // Prevent race-reduction compiler optimizations. 429 } 430 if (use_cpus_read_lock) 431 cpus_read_unlock(); 432 else 433 preempt_enable(); 434 if (!(torture_random(trsp) & 0xfff)) 435 schedule_timeout_uninterruptible(1); 436 } 437 438 // SCF test kthread. Repeatedly does calls to members of the 439 // smp_call_function() family of functions. 440 static int scftorture_invoker(void *arg) 441 { 442 int cpu; 443 int curcpu; 444 DEFINE_TORTURE_RANDOM(rand); 445 struct scf_statistics *scfp = (struct scf_statistics *)arg; 446 bool was_offline = false; 447 448 VERBOSE_SCFTORTOUT("scftorture_invoker %d: task started", scfp->cpu); 449 cpu = scfp->cpu % nr_cpu_ids; 450 WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(cpu))); 451 set_user_nice(current, MAX_NICE); 452 if (holdoff) 453 schedule_timeout_interruptible(holdoff * HZ); 454 455 VERBOSE_SCFTORTOUT("scftorture_invoker %d: Waiting for all SCF torturers from cpu %d", scfp->cpu, raw_smp_processor_id()); 456 457 // Make sure that the CPU is affinitized appropriately during testing. 458 curcpu = raw_smp_processor_id(); 459 WARN_ONCE(curcpu != scfp->cpu % nr_cpu_ids, 460 "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n", 461 __func__, scfp->cpu, curcpu, nr_cpu_ids); 462 463 if (!atomic_dec_return(&n_started)) 464 while (atomic_read_acquire(&n_started)) { 465 if (torture_must_stop()) { 466 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended before starting", scfp->cpu); 467 goto end; 468 } 469 schedule_timeout_uninterruptible(1); 470 } 471 472 VERBOSE_SCFTORTOUT("scftorture_invoker %d started", scfp->cpu); 473 474 do { 475 scftorture_invoke_one(scfp, &rand); 476 while (cpu_is_offline(cpu) && !torture_must_stop()) { 477 schedule_timeout_interruptible(HZ / 5); 478 was_offline = true; 479 } 480 if (was_offline) { 481 set_cpus_allowed_ptr(current, cpumask_of(cpu)); 482 was_offline = false; 483 } 484 cond_resched(); 485 stutter_wait("scftorture_invoker"); 486 } while (!torture_must_stop()); 487 488 VERBOSE_SCFTORTOUT("scftorture_invoker %d ended", scfp->cpu); 489 end: 490 torture_kthread_stopping("scftorture_invoker"); 491 return 0; 492 } 493 494 static void 495 scftorture_print_module_parms(const char *tag) 496 { 497 pr_alert(SCFTORT_FLAG 498 "--- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d\n", tag, 499 verbose, holdoff, longwait, nthreads, onoff_holdoff, onoff_interval, shutdown, stat_interval, stutter, use_cpus_read_lock, weight_resched, weight_single, weight_single_rpc, weight_single_wait, weight_many, weight_many_wait, weight_all, weight_all_wait); 500 } 501 502 static void scf_cleanup_handler(void *unused) 503 { 504 } 505 506 static void scf_torture_cleanup(void) 507 { 508 int i; 509 510 if (torture_cleanup_begin()) 511 return; 512 513 WRITE_ONCE(scfdone, true); 514 if (nthreads && scf_stats_p) 515 for (i = 0; i < nthreads; i++) 516 torture_stop_kthread("scftorture_invoker", scf_stats_p[i].task); 517 else 518 goto end; 519 smp_call_function(scf_cleanup_handler, NULL, 0); 520 torture_stop_kthread(scf_torture_stats, scf_torture_stats_task); 521 scf_torture_stats_print(); // -After- the stats thread is stopped! 522 kfree(scf_stats_p); // -After- the last stats print has completed! 523 scf_stats_p = NULL; 524 525 if (atomic_read(&n_errs) || atomic_read(&n_mb_in_errs) || atomic_read(&n_mb_out_errs)) 526 scftorture_print_module_parms("End of test: FAILURE"); 527 else if (torture_onoff_failures()) 528 scftorture_print_module_parms("End of test: LOCK_HOTPLUG"); 529 else 530 scftorture_print_module_parms("End of test: SUCCESS"); 531 532 end: 533 torture_cleanup_end(); 534 } 535 536 static int __init scf_torture_init(void) 537 { 538 long i; 539 int firsterr = 0; 540 unsigned long weight_resched1 = weight_resched; 541 unsigned long weight_single1 = weight_single; 542 unsigned long weight_single_rpc1 = weight_single_rpc; 543 unsigned long weight_single_wait1 = weight_single_wait; 544 unsigned long weight_many1 = weight_many; 545 unsigned long weight_many_wait1 = weight_many_wait; 546 unsigned long weight_all1 = weight_all; 547 unsigned long weight_all_wait1 = weight_all_wait; 548 549 if (!torture_init_begin(SCFTORT_STRING, verbose)) 550 return -EBUSY; 551 552 scftorture_print_module_parms("Start of test"); 553 554 if (weight_resched <= 0 && 555 weight_single <= 0 && weight_single_rpc <= 0 && weight_single_wait <= 0 && 556 weight_many <= 0 && weight_many_wait <= 0 && 557 weight_all <= 0 && weight_all_wait <= 0) { 558 weight_resched1 = weight_resched == 0 ? 0 : 2 * nr_cpu_ids; 559 weight_single1 = weight_single == 0 ? 0 : 2 * nr_cpu_ids; 560 weight_single_rpc1 = weight_single_rpc == 0 ? 0 : 2 * nr_cpu_ids; 561 weight_single_wait1 = weight_single_wait == 0 ? 0 : 2 * nr_cpu_ids; 562 weight_many1 = weight_many == 0 ? 0 : 2; 563 weight_many_wait1 = weight_many_wait == 0 ? 0 : 2; 564 weight_all1 = weight_all == 0 ? 0 : 1; 565 weight_all_wait1 = weight_all_wait == 0 ? 0 : 1; 566 } else { 567 if (weight_resched == -1) 568 weight_resched1 = 0; 569 if (weight_single == -1) 570 weight_single1 = 0; 571 if (weight_single_rpc == -1) 572 weight_single_rpc1 = 0; 573 if (weight_single_wait == -1) 574 weight_single_wait1 = 0; 575 if (weight_many == -1) 576 weight_many1 = 0; 577 if (weight_many_wait == -1) 578 weight_many_wait1 = 0; 579 if (weight_all == -1) 580 weight_all1 = 0; 581 if (weight_all_wait == -1) 582 weight_all_wait1 = 0; 583 } 584 if (weight_resched1 == 0 && weight_single1 == 0 && weight_single_rpc1 == 0 && 585 weight_single_wait1 == 0 && weight_many1 == 0 && weight_many_wait1 == 0 && 586 weight_all1 == 0 && weight_all_wait1 == 0) { 587 SCFTORTOUT_ERRSTRING("all zero weights makes no sense"); 588 firsterr = -EINVAL; 589 goto unwind; 590 } 591 if (IS_BUILTIN(CONFIG_SCF_TORTURE_TEST)) 592 scf_sel_add(weight_resched1, SCF_PRIM_RESCHED, false); 593 else if (weight_resched1) 594 SCFTORTOUT_ERRSTRING("built as module, weight_resched ignored"); 595 scf_sel_add(weight_single1, SCF_PRIM_SINGLE, false); 596 scf_sel_add(weight_single_rpc1, SCF_PRIM_SINGLE_RPC, true); 597 scf_sel_add(weight_single_wait1, SCF_PRIM_SINGLE, true); 598 scf_sel_add(weight_many1, SCF_PRIM_MANY, false); 599 scf_sel_add(weight_many_wait1, SCF_PRIM_MANY, true); 600 scf_sel_add(weight_all1, SCF_PRIM_ALL, false); 601 scf_sel_add(weight_all_wait1, SCF_PRIM_ALL, true); 602 scf_sel_dump(); 603 604 if (onoff_interval > 0) { 605 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, NULL); 606 if (torture_init_error(firsterr)) 607 goto unwind; 608 } 609 if (shutdown_secs > 0) { 610 firsterr = torture_shutdown_init(shutdown_secs, scf_torture_cleanup); 611 if (torture_init_error(firsterr)) 612 goto unwind; 613 } 614 if (stutter > 0) { 615 firsterr = torture_stutter_init(stutter, stutter); 616 if (torture_init_error(firsterr)) 617 goto unwind; 618 } 619 620 // Worker tasks invoking smp_call_function(). 621 if (nthreads < 0) 622 nthreads = num_online_cpus(); 623 scf_stats_p = kcalloc(nthreads, sizeof(scf_stats_p[0]), GFP_KERNEL); 624 if (!scf_stats_p) { 625 SCFTORTOUT_ERRSTRING("out of memory"); 626 firsterr = -ENOMEM; 627 goto unwind; 628 } 629 630 VERBOSE_SCFTORTOUT("Starting %d smp_call_function() threads", nthreads); 631 632 atomic_set(&n_started, nthreads); 633 for (i = 0; i < nthreads; i++) { 634 scf_stats_p[i].cpu = i; 635 firsterr = torture_create_kthread(scftorture_invoker, (void *)&scf_stats_p[i], 636 scf_stats_p[i].task); 637 if (torture_init_error(firsterr)) 638 goto unwind; 639 } 640 if (stat_interval > 0) { 641 firsterr = torture_create_kthread(scf_torture_stats, NULL, scf_torture_stats_task); 642 if (torture_init_error(firsterr)) 643 goto unwind; 644 } 645 646 torture_init_end(); 647 return 0; 648 649 unwind: 650 torture_init_end(); 651 scf_torture_cleanup(); 652 if (shutdown_secs) { 653 WARN_ON(!IS_MODULE(CONFIG_SCF_TORTURE_TEST)); 654 kernel_power_off(); 655 } 656 return firsterr; 657 } 658 659 module_init(scf_torture_init); 660 module_exit(scf_torture_cleanup); 661