1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SMP related functions 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Denis Joseph Barrow, 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 9 * 10 * based on other smp stuff by 11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 12 * (c) 1998 Ingo Molnar 13 * 14 * The code outside of smp.c uses logical cpu numbers, only smp.c does 15 * the translation of logical to physical cpu ids. All new code that 16 * operates on physical cpu numbers needs to go into smp.c. 17 */ 18 19 #define KMSG_COMPONENT "cpu" 20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22 #include <linux/workqueue.h> 23 #include <linux/memblock.h> 24 #include <linux/export.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/err.h> 28 #include <linux/spinlock.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/delay.h> 31 #include <linux/interrupt.h> 32 #include <linux/irqflags.h> 33 #include <linux/irq_work.h> 34 #include <linux/cpu.h> 35 #include <linux/slab.h> 36 #include <linux/sched/hotplug.h> 37 #include <linux/sched/task_stack.h> 38 #include <linux/crash_dump.h> 39 #include <linux/kprobes.h> 40 #include <asm/asm-offsets.h> 41 #include <asm/diag.h> 42 #include <asm/switch_to.h> 43 #include <asm/facility.h> 44 #include <asm/ipl.h> 45 #include <asm/setup.h> 46 #include <asm/irq.h> 47 #include <asm/tlbflush.h> 48 #include <asm/vtimer.h> 49 #include <asm/lowcore.h> 50 #include <asm/sclp.h> 51 #include <asm/debug.h> 52 #include <asm/os_info.h> 53 #include <asm/sigp.h> 54 #include <asm/idle.h> 55 #include <asm/nmi.h> 56 #include <asm/stacktrace.h> 57 #include <asm/topology.h> 58 #include <asm/vdso.h> 59 #include "entry.h" 60 61 enum { 62 ec_schedule = 0, 63 ec_call_function_single, 64 ec_stop_cpu, 65 ec_mcck_pending, 66 ec_irq_work, 67 }; 68 69 enum { 70 CPU_STATE_STANDBY, 71 CPU_STATE_CONFIGURED, 72 }; 73 74 static DEFINE_PER_CPU(struct cpu *, cpu_device); 75 76 struct pcpu { 77 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 78 unsigned long ec_clk; /* sigp timestamp for ec_xxx */ 79 signed char state; /* physical cpu state */ 80 signed char polarization; /* physical polarization */ 81 u16 address; /* physical cpu address */ 82 }; 83 84 static u8 boot_core_type; 85 static struct pcpu pcpu_devices[NR_CPUS]; 86 87 unsigned int smp_cpu_mt_shift; 88 EXPORT_SYMBOL(smp_cpu_mt_shift); 89 90 unsigned int smp_cpu_mtid; 91 EXPORT_SYMBOL(smp_cpu_mtid); 92 93 #ifdef CONFIG_CRASH_DUMP 94 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; 95 #endif 96 97 static unsigned int smp_max_threads __initdata = -1U; 98 99 static int __init early_nosmt(char *s) 100 { 101 smp_max_threads = 1; 102 return 0; 103 } 104 early_param("nosmt", early_nosmt); 105 106 static int __init early_smt(char *s) 107 { 108 get_option(&s, &smp_max_threads); 109 return 0; 110 } 111 early_param("smt", early_smt); 112 113 /* 114 * The smp_cpu_state_mutex must be held when changing the state or polarization 115 * member of a pcpu data structure within the pcpu_devices arreay. 116 */ 117 DEFINE_MUTEX(smp_cpu_state_mutex); 118 119 /* 120 * Signal processor helper functions. 121 */ 122 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm) 123 { 124 int cc; 125 126 while (1) { 127 cc = __pcpu_sigp(addr, order, parm, NULL); 128 if (cc != SIGP_CC_BUSY) 129 return cc; 130 cpu_relax(); 131 } 132 } 133 134 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 135 { 136 int cc, retry; 137 138 for (retry = 0; ; retry++) { 139 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); 140 if (cc != SIGP_CC_BUSY) 141 break; 142 if (retry >= 3) 143 udelay(10); 144 } 145 return cc; 146 } 147 148 static inline int pcpu_stopped(struct pcpu *pcpu) 149 { 150 u32 status; 151 152 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 153 0, &status) != SIGP_CC_STATUS_STORED) 154 return 0; 155 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 156 } 157 158 static inline int pcpu_running(struct pcpu *pcpu) 159 { 160 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 161 0, NULL) != SIGP_CC_STATUS_STORED) 162 return 1; 163 /* Status stored condition code is equivalent to cpu not running. */ 164 return 0; 165 } 166 167 /* 168 * Find struct pcpu by cpu address. 169 */ 170 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) 171 { 172 int cpu; 173 174 for_each_cpu(cpu, mask) 175 if (pcpu_devices[cpu].address == address) 176 return pcpu_devices + cpu; 177 return NULL; 178 } 179 180 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 181 { 182 int order; 183 184 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 185 return; 186 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 187 pcpu->ec_clk = get_tod_clock_fast(); 188 pcpu_sigp_retry(pcpu, order, 0); 189 } 190 191 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 192 { 193 unsigned long async_stack, nodat_stack, mcck_stack; 194 struct lowcore *lc; 195 196 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 197 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 198 async_stack = stack_alloc(); 199 mcck_stack = stack_alloc(); 200 if (!lc || !nodat_stack || !async_stack || !mcck_stack) 201 goto out; 202 memcpy(lc, &S390_lowcore, 512); 203 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 204 lc->async_stack = async_stack + STACK_INIT_OFFSET; 205 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; 206 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; 207 lc->cpu_nr = cpu; 208 lc->spinlock_lockval = arch_spin_lockval(cpu); 209 lc->spinlock_index = 0; 210 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 211 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 212 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 213 lc->preempt_count = PREEMPT_DISABLED; 214 if (nmi_alloc_per_cpu(lc)) 215 goto out; 216 lowcore_ptr[cpu] = lc; 217 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 218 return 0; 219 220 out: 221 stack_free(mcck_stack); 222 stack_free(async_stack); 223 free_pages(nodat_stack, THREAD_SIZE_ORDER); 224 free_pages((unsigned long) lc, LC_ORDER); 225 return -ENOMEM; 226 } 227 228 static void pcpu_free_lowcore(struct pcpu *pcpu) 229 { 230 unsigned long async_stack, nodat_stack, mcck_stack; 231 struct lowcore *lc; 232 int cpu; 233 234 cpu = pcpu - pcpu_devices; 235 lc = lowcore_ptr[cpu]; 236 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET; 237 async_stack = lc->async_stack - STACK_INIT_OFFSET; 238 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET; 239 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 240 lowcore_ptr[cpu] = NULL; 241 nmi_free_per_cpu(lc); 242 stack_free(async_stack); 243 stack_free(mcck_stack); 244 free_pages(nodat_stack, THREAD_SIZE_ORDER); 245 free_pages((unsigned long) lc, LC_ORDER); 246 } 247 248 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 249 { 250 struct lowcore *lc = lowcore_ptr[cpu]; 251 252 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); 253 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 254 lc->cpu_nr = cpu; 255 lc->spinlock_lockval = arch_spin_lockval(cpu); 256 lc->spinlock_index = 0; 257 lc->percpu_offset = __per_cpu_offset[cpu]; 258 lc->kernel_asce = S390_lowcore.kernel_asce; 259 lc->user_asce = s390_invalid_asce; 260 lc->machine_flags = S390_lowcore.machine_flags; 261 lc->user_timer = lc->system_timer = 262 lc->steal_timer = lc->avg_steal_timer = 0; 263 __ctl_store(lc->cregs_save_area, 0, 15); 264 lc->cregs_save_area[1] = lc->kernel_asce; 265 lc->cregs_save_area[7] = lc->user_asce; 266 save_access_regs((unsigned int *) lc->access_regs_save_area); 267 arch_spin_lock_setup(cpu); 268 } 269 270 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 271 { 272 struct lowcore *lc; 273 int cpu; 274 275 cpu = pcpu - pcpu_devices; 276 lc = lowcore_ptr[cpu]; 277 lc->kernel_stack = (unsigned long) task_stack_page(tsk) 278 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 279 lc->current_task = (unsigned long) tsk; 280 lc->lpp = LPP_MAGIC; 281 lc->current_pid = tsk->pid; 282 lc->user_timer = tsk->thread.user_timer; 283 lc->guest_timer = tsk->thread.guest_timer; 284 lc->system_timer = tsk->thread.system_timer; 285 lc->hardirq_timer = tsk->thread.hardirq_timer; 286 lc->softirq_timer = tsk->thread.softirq_timer; 287 lc->steal_timer = 0; 288 } 289 290 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) 291 { 292 struct lowcore *lc; 293 int cpu; 294 295 cpu = pcpu - pcpu_devices; 296 lc = lowcore_ptr[cpu]; 297 lc->restart_stack = lc->nodat_stack; 298 lc->restart_fn = (unsigned long) func; 299 lc->restart_data = (unsigned long) data; 300 lc->restart_source = -1UL; 301 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); 302 } 303 304 typedef void (pcpu_delegate_fn)(void *); 305 306 /* 307 * Call function via PSW restart on pcpu and stop the current cpu. 308 */ 309 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data) 310 { 311 func(data); /* should not return */ 312 } 313 314 static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu, 315 pcpu_delegate_fn *func, 316 void *data, unsigned long stack) 317 { 318 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 319 unsigned long source_cpu = stap(); 320 321 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 322 if (pcpu->address == source_cpu) { 323 call_on_stack(2, stack, void, __pcpu_delegate, 324 pcpu_delegate_fn *, func, void *, data); 325 } 326 /* Stop target cpu (if func returns this stops the current cpu). */ 327 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 328 /* Restart func on the target cpu and stop the current cpu. */ 329 mem_assign_absolute(lc->restart_stack, stack); 330 mem_assign_absolute(lc->restart_fn, (unsigned long) func); 331 mem_assign_absolute(lc->restart_data, (unsigned long) data); 332 mem_assign_absolute(lc->restart_source, source_cpu); 333 __bpon(); 334 asm volatile( 335 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 336 " brc 2,0b # busy, try again\n" 337 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 338 " brc 2,1b # busy, try again\n" 339 : : "d" (pcpu->address), "d" (source_cpu), 340 "K" (SIGP_RESTART), "K" (SIGP_STOP) 341 : "0", "1", "cc"); 342 for (;;) ; 343 } 344 345 /* 346 * Enable additional logical cpus for multi-threading. 347 */ 348 static int pcpu_set_smt(unsigned int mtid) 349 { 350 int cc; 351 352 if (smp_cpu_mtid == mtid) 353 return 0; 354 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL); 355 if (cc == 0) { 356 smp_cpu_mtid = mtid; 357 smp_cpu_mt_shift = 0; 358 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) 359 smp_cpu_mt_shift++; 360 pcpu_devices[0].address = stap(); 361 } 362 return cc; 363 } 364 365 /* 366 * Call function on an online CPU. 367 */ 368 void smp_call_online_cpu(void (*func)(void *), void *data) 369 { 370 struct pcpu *pcpu; 371 372 /* Use the current cpu if it is online. */ 373 pcpu = pcpu_find_address(cpu_online_mask, stap()); 374 if (!pcpu) 375 /* Use the first online cpu. */ 376 pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 377 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 378 } 379 380 /* 381 * Call function on the ipl CPU. 382 */ 383 void smp_call_ipl_cpu(void (*func)(void *), void *data) 384 { 385 struct lowcore *lc = lowcore_ptr[0]; 386 387 if (pcpu_devices[0].address == stap()) 388 lc = &S390_lowcore; 389 390 pcpu_delegate(&pcpu_devices[0], func, data, 391 lc->nodat_stack); 392 } 393 394 int smp_find_processor_id(u16 address) 395 { 396 int cpu; 397 398 for_each_present_cpu(cpu) 399 if (pcpu_devices[cpu].address == address) 400 return cpu; 401 return -1; 402 } 403 404 void schedule_mcck_handler(void) 405 { 406 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending); 407 } 408 409 bool notrace arch_vcpu_is_preempted(int cpu) 410 { 411 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) 412 return false; 413 if (pcpu_running(pcpu_devices + cpu)) 414 return false; 415 return true; 416 } 417 EXPORT_SYMBOL(arch_vcpu_is_preempted); 418 419 void notrace smp_yield_cpu(int cpu) 420 { 421 if (!MACHINE_HAS_DIAG9C) 422 return; 423 diag_stat_inc_norecursion(DIAG_STAT_X09C); 424 asm volatile("diag %0,0,0x9c" 425 : : "d" (pcpu_devices[cpu].address)); 426 } 427 EXPORT_SYMBOL_GPL(smp_yield_cpu); 428 429 /* 430 * Send cpus emergency shutdown signal. This gives the cpus the 431 * opportunity to complete outstanding interrupts. 432 */ 433 void notrace smp_emergency_stop(void) 434 { 435 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 436 static cpumask_t cpumask; 437 u64 end; 438 int cpu; 439 440 arch_spin_lock(&lock); 441 cpumask_copy(&cpumask, cpu_online_mask); 442 cpumask_clear_cpu(smp_processor_id(), &cpumask); 443 444 end = get_tod_clock() + (1000000UL << 12); 445 for_each_cpu(cpu, &cpumask) { 446 struct pcpu *pcpu = pcpu_devices + cpu; 447 set_bit(ec_stop_cpu, &pcpu->ec_mask); 448 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 449 0, NULL) == SIGP_CC_BUSY && 450 get_tod_clock() < end) 451 cpu_relax(); 452 } 453 while (get_tod_clock() < end) { 454 for_each_cpu(cpu, &cpumask) 455 if (pcpu_stopped(pcpu_devices + cpu)) 456 cpumask_clear_cpu(cpu, &cpumask); 457 if (cpumask_empty(&cpumask)) 458 break; 459 cpu_relax(); 460 } 461 arch_spin_unlock(&lock); 462 } 463 NOKPROBE_SYMBOL(smp_emergency_stop); 464 465 /* 466 * Stop all cpus but the current one. 467 */ 468 void smp_send_stop(void) 469 { 470 int cpu; 471 472 /* Disable all interrupts/machine checks */ 473 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 474 trace_hardirqs_off(); 475 476 debug_set_critical(); 477 478 if (oops_in_progress) 479 smp_emergency_stop(); 480 481 /* stop all processors */ 482 for_each_online_cpu(cpu) { 483 if (cpu == smp_processor_id()) 484 continue; 485 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0); 486 while (!pcpu_stopped(pcpu_devices + cpu)) 487 cpu_relax(); 488 } 489 } 490 491 /* 492 * This is the main routine where commands issued by other 493 * cpus are handled. 494 */ 495 static void smp_handle_ext_call(void) 496 { 497 unsigned long bits; 498 499 /* handle bit signal external calls */ 500 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); 501 if (test_bit(ec_stop_cpu, &bits)) 502 smp_stop_cpu(); 503 if (test_bit(ec_schedule, &bits)) 504 scheduler_ipi(); 505 if (test_bit(ec_call_function_single, &bits)) 506 generic_smp_call_function_single_interrupt(); 507 if (test_bit(ec_mcck_pending, &bits)) 508 __s390_handle_mcck(); 509 if (test_bit(ec_irq_work, &bits)) 510 irq_work_run(); 511 } 512 513 static void do_ext_call_interrupt(struct ext_code ext_code, 514 unsigned int param32, unsigned long param64) 515 { 516 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); 517 smp_handle_ext_call(); 518 } 519 520 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 521 { 522 int cpu; 523 524 for_each_cpu(cpu, mask) 525 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 526 } 527 528 void arch_send_call_function_single_ipi(int cpu) 529 { 530 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 531 } 532 533 /* 534 * this function sends a 'reschedule' IPI to another CPU. 535 * it goes straight through and wastes no time serializing 536 * anything. Worst case is that we lose a reschedule ... 537 */ 538 void smp_send_reschedule(int cpu) 539 { 540 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 541 } 542 543 #ifdef CONFIG_IRQ_WORK 544 void arch_irq_work_raise(void) 545 { 546 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work); 547 } 548 #endif 549 550 /* 551 * parameter area for the set/clear control bit callbacks 552 */ 553 struct ec_creg_mask_parms { 554 unsigned long orval; 555 unsigned long andval; 556 int cr; 557 }; 558 559 /* 560 * callback for setting/clearing control bits 561 */ 562 static void smp_ctl_bit_callback(void *info) 563 { 564 struct ec_creg_mask_parms *pp = info; 565 unsigned long cregs[16]; 566 567 __ctl_store(cregs, 0, 15); 568 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 569 __ctl_load(cregs, 0, 15); 570 } 571 572 /* 573 * Set a bit in a control register of all cpus 574 */ 575 void smp_ctl_set_bit(int cr, int bit) 576 { 577 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 578 579 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 580 } 581 EXPORT_SYMBOL(smp_ctl_set_bit); 582 583 /* 584 * Clear a bit in a control register of all cpus 585 */ 586 void smp_ctl_clear_bit(int cr, int bit) 587 { 588 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 589 590 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 591 } 592 EXPORT_SYMBOL(smp_ctl_clear_bit); 593 594 #ifdef CONFIG_CRASH_DUMP 595 596 int smp_store_status(int cpu) 597 { 598 struct lowcore *lc; 599 struct pcpu *pcpu; 600 unsigned long pa; 601 602 pcpu = pcpu_devices + cpu; 603 lc = lowcore_ptr[cpu]; 604 pa = __pa(&lc->floating_pt_save_area); 605 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 606 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 607 return -EIO; 608 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) 609 return 0; 610 pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK); 611 if (MACHINE_HAS_GS) 612 pa |= lc->mcesad & MCESA_LC_MASK; 613 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 614 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 615 return -EIO; 616 return 0; 617 } 618 619 /* 620 * Collect CPU state of the previous, crashed system. 621 * There are four cases: 622 * 1) standard zfcp/nvme dump 623 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true 624 * The state for all CPUs except the boot CPU needs to be collected 625 * with sigp stop-and-store-status. The boot CPU state is located in 626 * the absolute lowcore of the memory stored in the HSA. The zcore code 627 * will copy the boot CPU state from the HSA. 628 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) 629 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true 630 * The state for all CPUs except the boot CPU needs to be collected 631 * with sigp stop-and-store-status. The firmware or the boot-loader 632 * stored the registers of the boot CPU in the absolute lowcore in the 633 * memory of the old system. 634 * 3) kdump and the old kernel did not store the CPU state, 635 * or stand-alone kdump for DASD 636 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() 637 * The state for all CPUs except the boot CPU needs to be collected 638 * with sigp stop-and-store-status. The kexec code or the boot-loader 639 * stored the registers of the boot CPU in the memory of the old system. 640 * 4) kdump and the old kernel stored the CPU state 641 * condition: OLDMEM_BASE != NULL && is_kdump_kernel() 642 * This case does not exist for s390 anymore, setup_arch explicitly 643 * deactivates the elfcorehdr= kernel parameter 644 */ 645 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, 646 bool is_boot_cpu, unsigned long page) 647 { 648 __vector128 *vxrs = (__vector128 *) page; 649 650 if (is_boot_cpu) 651 vxrs = boot_cpu_vector_save_area; 652 else 653 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); 654 save_area_add_vxrs(sa, vxrs); 655 } 656 657 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, 658 bool is_boot_cpu, unsigned long page) 659 { 660 void *regs = (void *) page; 661 662 if (is_boot_cpu) 663 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); 664 else 665 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); 666 save_area_add_regs(sa, regs); 667 } 668 669 void __init smp_save_dump_cpus(void) 670 { 671 int addr, boot_cpu_addr, max_cpu_addr; 672 struct save_area *sa; 673 unsigned long page; 674 bool is_boot_cpu; 675 676 if (!(OLDMEM_BASE || is_ipl_type_dump())) 677 /* No previous system present, normal boot. */ 678 return; 679 /* Allocate a page as dumping area for the store status sigps */ 680 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); 681 if (!page) 682 panic("ERROR: Failed to allocate %lx bytes below %lx\n", 683 PAGE_SIZE, 1UL << 31); 684 685 /* Set multi-threading state to the previous system. */ 686 pcpu_set_smt(sclp.mtid_prev); 687 boot_cpu_addr = stap(); 688 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; 689 for (addr = 0; addr <= max_cpu_addr; addr++) { 690 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) == 691 SIGP_CC_NOT_OPERATIONAL) 692 continue; 693 is_boot_cpu = (addr == boot_cpu_addr); 694 /* Allocate save area */ 695 sa = save_area_alloc(is_boot_cpu); 696 if (!sa) 697 panic("could not allocate memory for save area\n"); 698 if (MACHINE_HAS_VX) 699 /* Get the vector registers */ 700 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); 701 /* 702 * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers 703 * of the boot CPU are stored in the HSA. To retrieve 704 * these registers an SCLP request is required which is 705 * done by drivers/s390/char/zcore.c:init_cpu_info() 706 */ 707 if (!is_boot_cpu || OLDMEM_BASE) 708 /* Get the CPU registers */ 709 smp_save_cpu_regs(sa, addr, is_boot_cpu, page); 710 } 711 memblock_free(page, PAGE_SIZE); 712 diag_dma_ops.diag308_reset(); 713 pcpu_set_smt(0); 714 } 715 #endif /* CONFIG_CRASH_DUMP */ 716 717 void smp_cpu_set_polarization(int cpu, int val) 718 { 719 pcpu_devices[cpu].polarization = val; 720 } 721 722 int smp_cpu_get_polarization(int cpu) 723 { 724 return pcpu_devices[cpu].polarization; 725 } 726 727 int smp_cpu_get_cpu_address(int cpu) 728 { 729 return pcpu_devices[cpu].address; 730 } 731 732 static void __ref smp_get_core_info(struct sclp_core_info *info, int early) 733 { 734 static int use_sigp_detection; 735 int address; 736 737 if (use_sigp_detection || sclp_get_core_info(info, early)) { 738 use_sigp_detection = 1; 739 for (address = 0; 740 address < (SCLP_MAX_CORES << smp_cpu_mt_shift); 741 address += (1U << smp_cpu_mt_shift)) { 742 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) == 743 SIGP_CC_NOT_OPERATIONAL) 744 continue; 745 info->core[info->configured].core_id = 746 address >> smp_cpu_mt_shift; 747 info->configured++; 748 } 749 info->combined = info->configured; 750 } 751 } 752 753 static int smp_add_present_cpu(int cpu); 754 755 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, 756 bool configured, bool early) 757 { 758 struct pcpu *pcpu; 759 int cpu, nr, i; 760 u16 address; 761 762 nr = 0; 763 if (sclp.has_core_type && core->type != boot_core_type) 764 return nr; 765 cpu = cpumask_first(avail); 766 address = core->core_id << smp_cpu_mt_shift; 767 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { 768 if (pcpu_find_address(cpu_present_mask, address + i)) 769 continue; 770 pcpu = pcpu_devices + cpu; 771 pcpu->address = address + i; 772 if (configured) 773 pcpu->state = CPU_STATE_CONFIGURED; 774 else 775 pcpu->state = CPU_STATE_STANDBY; 776 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 777 set_cpu_present(cpu, true); 778 if (!early && smp_add_present_cpu(cpu) != 0) 779 set_cpu_present(cpu, false); 780 else 781 nr++; 782 cpumask_clear_cpu(cpu, avail); 783 cpu = cpumask_next(cpu, avail); 784 } 785 return nr; 786 } 787 788 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) 789 { 790 struct sclp_core_entry *core; 791 static cpumask_t avail; 792 bool configured; 793 u16 core_id; 794 int nr, i; 795 796 get_online_cpus(); 797 mutex_lock(&smp_cpu_state_mutex); 798 nr = 0; 799 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 800 /* 801 * Add IPL core first (which got logical CPU number 0) to make sure 802 * that all SMT threads get subsequent logical CPU numbers. 803 */ 804 if (early) { 805 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; 806 for (i = 0; i < info->configured; i++) { 807 core = &info->core[i]; 808 if (core->core_id == core_id) { 809 nr += smp_add_core(core, &avail, true, early); 810 break; 811 } 812 } 813 } 814 for (i = 0; i < info->combined; i++) { 815 configured = i < info->configured; 816 nr += smp_add_core(&info->core[i], &avail, configured, early); 817 } 818 mutex_unlock(&smp_cpu_state_mutex); 819 put_online_cpus(); 820 return nr; 821 } 822 823 void __init smp_detect_cpus(void) 824 { 825 unsigned int cpu, mtid, c_cpus, s_cpus; 826 struct sclp_core_info *info; 827 u16 address; 828 829 /* Get CPU information */ 830 info = memblock_alloc(sizeof(*info), 8); 831 if (!info) 832 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 833 __func__, sizeof(*info), 8); 834 smp_get_core_info(info, 1); 835 /* Find boot CPU type */ 836 if (sclp.has_core_type) { 837 address = stap(); 838 for (cpu = 0; cpu < info->combined; cpu++) 839 if (info->core[cpu].core_id == address) { 840 /* The boot cpu dictates the cpu type. */ 841 boot_core_type = info->core[cpu].type; 842 break; 843 } 844 if (cpu >= info->combined) 845 panic("Could not find boot CPU type"); 846 } 847 848 /* Set multi-threading state for the current system */ 849 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 850 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 851 pcpu_set_smt(mtid); 852 853 /* Print number of CPUs */ 854 c_cpus = s_cpus = 0; 855 for (cpu = 0; cpu < info->combined; cpu++) { 856 if (sclp.has_core_type && 857 info->core[cpu].type != boot_core_type) 858 continue; 859 if (cpu < info->configured) 860 c_cpus += smp_cpu_mtid + 1; 861 else 862 s_cpus += smp_cpu_mtid + 1; 863 } 864 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 865 866 /* Add CPUs present at boot */ 867 __smp_rescan_cpus(info, true); 868 memblock_free_early((unsigned long)info, sizeof(*info)); 869 } 870 871 static void smp_init_secondary(void) 872 { 873 int cpu = raw_smp_processor_id(); 874 875 S390_lowcore.last_update_clock = get_tod_clock(); 876 restore_access_regs(S390_lowcore.access_regs_save_area); 877 cpu_init(); 878 rcu_cpu_starting(cpu); 879 init_cpu_timer(); 880 vtime_init(); 881 vdso_getcpu_init(); 882 pfault_init(); 883 notify_cpu_starting(cpu); 884 if (topology_cpu_dedicated(cpu)) 885 set_cpu_flag(CIF_DEDICATED_CPU); 886 else 887 clear_cpu_flag(CIF_DEDICATED_CPU); 888 set_cpu_online(cpu, true); 889 update_cpu_masks(); 890 inc_irq_stat(CPU_RST); 891 local_irq_enable(); 892 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 893 } 894 895 /* 896 * Activate a secondary processor. 897 */ 898 static void __no_sanitize_address smp_start_secondary(void *cpuvoid) 899 { 900 S390_lowcore.restart_stack = (unsigned long) restart_stack; 901 S390_lowcore.restart_fn = (unsigned long) do_restart; 902 S390_lowcore.restart_data = 0; 903 S390_lowcore.restart_source = -1UL; 904 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 905 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 906 call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack); 907 } 908 909 /* Upping and downing of CPUs */ 910 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 911 { 912 struct pcpu *pcpu = pcpu_devices + cpu; 913 int rc; 914 915 if (pcpu->state != CPU_STATE_CONFIGURED) 916 return -EIO; 917 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 918 SIGP_CC_ORDER_CODE_ACCEPTED) 919 return -EIO; 920 921 rc = pcpu_alloc_lowcore(pcpu, cpu); 922 if (rc) 923 return rc; 924 pcpu_prepare_secondary(pcpu, cpu); 925 pcpu_attach_task(pcpu, tidle); 926 pcpu_start_fn(pcpu, smp_start_secondary, NULL); 927 /* Wait until cpu puts itself in the online & active maps */ 928 while (!cpu_online(cpu)) 929 cpu_relax(); 930 return 0; 931 } 932 933 static unsigned int setup_possible_cpus __initdata; 934 935 static int __init _setup_possible_cpus(char *s) 936 { 937 get_option(&s, &setup_possible_cpus); 938 return 0; 939 } 940 early_param("possible_cpus", _setup_possible_cpus); 941 942 int __cpu_disable(void) 943 { 944 unsigned long cregs[16]; 945 946 /* Handle possible pending IPIs */ 947 smp_handle_ext_call(); 948 set_cpu_online(smp_processor_id(), false); 949 update_cpu_masks(); 950 /* Disable pseudo page faults on this cpu. */ 951 pfault_fini(); 952 /* Disable interrupt sources via control register. */ 953 __ctl_store(cregs, 0, 15); 954 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 955 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 956 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 957 __ctl_load(cregs, 0, 15); 958 clear_cpu_flag(CIF_NOHZ_DELAY); 959 return 0; 960 } 961 962 void __cpu_die(unsigned int cpu) 963 { 964 struct pcpu *pcpu; 965 966 /* Wait until target cpu is down */ 967 pcpu = pcpu_devices + cpu; 968 while (!pcpu_stopped(pcpu)) 969 cpu_relax(); 970 pcpu_free_lowcore(pcpu); 971 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); 972 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); 973 } 974 975 void __noreturn cpu_die(void) 976 { 977 idle_task_exit(); 978 __bpon(); 979 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 980 for (;;) ; 981 } 982 983 void __init smp_fill_possible_mask(void) 984 { 985 unsigned int possible, sclp_max, cpu; 986 987 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1; 988 sclp_max = min(smp_max_threads, sclp_max); 989 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids; 990 possible = setup_possible_cpus ?: nr_cpu_ids; 991 possible = min(possible, sclp_max); 992 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 993 set_cpu_possible(cpu, true); 994 } 995 996 void __init smp_prepare_cpus(unsigned int max_cpus) 997 { 998 /* request the 0x1201 emergency signal external interrupt */ 999 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 1000 panic("Couldn't request external interrupt 0x1201"); 1001 /* request the 0x1202 external call external interrupt */ 1002 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 1003 panic("Couldn't request external interrupt 0x1202"); 1004 } 1005 1006 void __init smp_prepare_boot_cpu(void) 1007 { 1008 struct pcpu *pcpu = pcpu_devices; 1009 1010 WARN_ON(!cpu_present(0) || !cpu_online(0)); 1011 pcpu->state = CPU_STATE_CONFIGURED; 1012 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 1013 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 1014 } 1015 1016 void __init smp_setup_processor_id(void) 1017 { 1018 pcpu_devices[0].address = stap(); 1019 S390_lowcore.cpu_nr = 0; 1020 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 1021 S390_lowcore.spinlock_index = 0; 1022 } 1023 1024 /* 1025 * the frequency of the profiling timer can be changed 1026 * by writing a multiplier value into /proc/profile. 1027 * 1028 * usually you want to run this on all CPUs ;) 1029 */ 1030 int setup_profiling_timer(unsigned int multiplier) 1031 { 1032 return 0; 1033 } 1034 1035 static ssize_t cpu_configure_show(struct device *dev, 1036 struct device_attribute *attr, char *buf) 1037 { 1038 ssize_t count; 1039 1040 mutex_lock(&smp_cpu_state_mutex); 1041 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 1042 mutex_unlock(&smp_cpu_state_mutex); 1043 return count; 1044 } 1045 1046 static ssize_t cpu_configure_store(struct device *dev, 1047 struct device_attribute *attr, 1048 const char *buf, size_t count) 1049 { 1050 struct pcpu *pcpu; 1051 int cpu, val, rc, i; 1052 char delim; 1053 1054 if (sscanf(buf, "%d %c", &val, &delim) != 1) 1055 return -EINVAL; 1056 if (val != 0 && val != 1) 1057 return -EINVAL; 1058 get_online_cpus(); 1059 mutex_lock(&smp_cpu_state_mutex); 1060 rc = -EBUSY; 1061 /* disallow configuration changes of online cpus and cpu 0 */ 1062 cpu = dev->id; 1063 cpu = smp_get_base_cpu(cpu); 1064 if (cpu == 0) 1065 goto out; 1066 for (i = 0; i <= smp_cpu_mtid; i++) 1067 if (cpu_online(cpu + i)) 1068 goto out; 1069 pcpu = pcpu_devices + cpu; 1070 rc = 0; 1071 switch (val) { 1072 case 0: 1073 if (pcpu->state != CPU_STATE_CONFIGURED) 1074 break; 1075 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); 1076 if (rc) 1077 break; 1078 for (i = 0; i <= smp_cpu_mtid; i++) { 1079 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1080 continue; 1081 pcpu[i].state = CPU_STATE_STANDBY; 1082 smp_cpu_set_polarization(cpu + i, 1083 POLARIZATION_UNKNOWN); 1084 } 1085 topology_expect_change(); 1086 break; 1087 case 1: 1088 if (pcpu->state != CPU_STATE_STANDBY) 1089 break; 1090 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); 1091 if (rc) 1092 break; 1093 for (i = 0; i <= smp_cpu_mtid; i++) { 1094 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1095 continue; 1096 pcpu[i].state = CPU_STATE_CONFIGURED; 1097 smp_cpu_set_polarization(cpu + i, 1098 POLARIZATION_UNKNOWN); 1099 } 1100 topology_expect_change(); 1101 break; 1102 default: 1103 break; 1104 } 1105 out: 1106 mutex_unlock(&smp_cpu_state_mutex); 1107 put_online_cpus(); 1108 return rc ? rc : count; 1109 } 1110 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 1111 1112 static ssize_t show_cpu_address(struct device *dev, 1113 struct device_attribute *attr, char *buf) 1114 { 1115 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 1116 } 1117 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 1118 1119 static struct attribute *cpu_common_attrs[] = { 1120 &dev_attr_configure.attr, 1121 &dev_attr_address.attr, 1122 NULL, 1123 }; 1124 1125 static struct attribute_group cpu_common_attr_group = { 1126 .attrs = cpu_common_attrs, 1127 }; 1128 1129 static struct attribute *cpu_online_attrs[] = { 1130 &dev_attr_idle_count.attr, 1131 &dev_attr_idle_time_us.attr, 1132 NULL, 1133 }; 1134 1135 static struct attribute_group cpu_online_attr_group = { 1136 .attrs = cpu_online_attrs, 1137 }; 1138 1139 static int smp_cpu_online(unsigned int cpu) 1140 { 1141 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1142 1143 return sysfs_create_group(&s->kobj, &cpu_online_attr_group); 1144 } 1145 1146 static int smp_cpu_pre_down(unsigned int cpu) 1147 { 1148 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1149 1150 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 1151 return 0; 1152 } 1153 1154 static int smp_add_present_cpu(int cpu) 1155 { 1156 struct device *s; 1157 struct cpu *c; 1158 int rc; 1159 1160 c = kzalloc(sizeof(*c), GFP_KERNEL); 1161 if (!c) 1162 return -ENOMEM; 1163 per_cpu(cpu_device, cpu) = c; 1164 s = &c->dev; 1165 c->hotpluggable = 1; 1166 rc = register_cpu(c, cpu); 1167 if (rc) 1168 goto out; 1169 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 1170 if (rc) 1171 goto out_cpu; 1172 rc = topology_cpu_init(c); 1173 if (rc) 1174 goto out_topology; 1175 return 0; 1176 1177 out_topology: 1178 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 1179 out_cpu: 1180 unregister_cpu(c); 1181 out: 1182 return rc; 1183 } 1184 1185 int __ref smp_rescan_cpus(void) 1186 { 1187 struct sclp_core_info *info; 1188 int nr; 1189 1190 info = kzalloc(sizeof(*info), GFP_KERNEL); 1191 if (!info) 1192 return -ENOMEM; 1193 smp_get_core_info(info, 0); 1194 nr = __smp_rescan_cpus(info, false); 1195 kfree(info); 1196 if (nr) 1197 topology_schedule_update(); 1198 return 0; 1199 } 1200 1201 static ssize_t __ref rescan_store(struct device *dev, 1202 struct device_attribute *attr, 1203 const char *buf, 1204 size_t count) 1205 { 1206 int rc; 1207 1208 rc = lock_device_hotplug_sysfs(); 1209 if (rc) 1210 return rc; 1211 rc = smp_rescan_cpus(); 1212 unlock_device_hotplug(); 1213 return rc ? rc : count; 1214 } 1215 static DEVICE_ATTR_WO(rescan); 1216 1217 static int __init s390_smp_init(void) 1218 { 1219 int cpu, rc = 0; 1220 1221 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1222 if (rc) 1223 return rc; 1224 for_each_present_cpu(cpu) { 1225 rc = smp_add_present_cpu(cpu); 1226 if (rc) 1227 goto out; 1228 } 1229 1230 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online", 1231 smp_cpu_online, smp_cpu_pre_down); 1232 rc = rc <= 0 ? rc : 0; 1233 out: 1234 return rc; 1235 } 1236 subsys_initcall(s390_smp_init); 1237 1238 static __always_inline void set_new_lowcore(struct lowcore *lc) 1239 { 1240 union register_pair dst, src; 1241 u32 pfx; 1242 1243 src.even = (unsigned long) &S390_lowcore; 1244 src.odd = sizeof(S390_lowcore); 1245 dst.even = (unsigned long) lc; 1246 dst.odd = sizeof(*lc); 1247 pfx = (unsigned long) lc; 1248 1249 asm volatile( 1250 " mvcl %[dst],%[src]\n" 1251 " spx %[pfx]\n" 1252 : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair) 1253 : [pfx] "Q" (pfx) 1254 : "memory", "cc"); 1255 } 1256 1257 static int __init smp_reinit_ipl_cpu(void) 1258 { 1259 unsigned long async_stack, nodat_stack, mcck_stack; 1260 struct lowcore *lc, *lc_ipl; 1261 unsigned long flags; 1262 1263 lc_ipl = lowcore_ptr[0]; 1264 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 1265 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 1266 async_stack = stack_alloc(); 1267 mcck_stack = stack_alloc(); 1268 if (!lc || !nodat_stack || !async_stack || !mcck_stack) 1269 panic("Couldn't allocate memory"); 1270 1271 local_irq_save(flags); 1272 local_mcck_disable(); 1273 set_new_lowcore(lc); 1274 S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET; 1275 S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET; 1276 S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET; 1277 lowcore_ptr[0] = lc; 1278 local_mcck_enable(); 1279 local_irq_restore(flags); 1280 1281 free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER); 1282 memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE); 1283 memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl)); 1284 1285 return 0; 1286 } 1287 early_initcall(smp_reinit_ipl_cpu); 1288