1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SMP related functions 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Denis Joseph Barrow, 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 9 * 10 * based on other smp stuff by 11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 12 * (c) 1998 Ingo Molnar 13 * 14 * The code outside of smp.c uses logical cpu numbers, only smp.c does 15 * the translation of logical to physical cpu ids. All new code that 16 * operates on physical cpu numbers needs to go into smp.c. 17 */ 18 19 #define KMSG_COMPONENT "cpu" 20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22 #include <linux/workqueue.h> 23 #include <linux/memblock.h> 24 #include <linux/export.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/err.h> 28 #include <linux/spinlock.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/delay.h> 31 #include <linux/interrupt.h> 32 #include <linux/irqflags.h> 33 #include <linux/irq_work.h> 34 #include <linux/cpu.h> 35 #include <linux/slab.h> 36 #include <linux/sched/hotplug.h> 37 #include <linux/sched/task_stack.h> 38 #include <linux/crash_dump.h> 39 #include <linux/kprobes.h> 40 #include <asm/asm-offsets.h> 41 #include <asm/diag.h> 42 #include <asm/switch_to.h> 43 #include <asm/facility.h> 44 #include <asm/ipl.h> 45 #include <asm/setup.h> 46 #include <asm/irq.h> 47 #include <asm/tlbflush.h> 48 #include <asm/vtimer.h> 49 #include <asm/lowcore.h> 50 #include <asm/sclp.h> 51 #include <asm/debug.h> 52 #include <asm/os_info.h> 53 #include <asm/sigp.h> 54 #include <asm/idle.h> 55 #include <asm/nmi.h> 56 #include <asm/stacktrace.h> 57 #include <asm/topology.h> 58 #include <asm/vdso.h> 59 #include "entry.h" 60 61 enum { 62 ec_schedule = 0, 63 ec_call_function_single, 64 ec_stop_cpu, 65 ec_mcck_pending, 66 ec_irq_work, 67 }; 68 69 enum { 70 CPU_STATE_STANDBY, 71 CPU_STATE_CONFIGURED, 72 }; 73 74 static DEFINE_PER_CPU(struct cpu *, cpu_device); 75 76 struct pcpu { 77 struct lowcore *lowcore; /* lowcore page(s) for the cpu */ 78 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 79 unsigned long ec_clk; /* sigp timestamp for ec_xxx */ 80 signed char state; /* physical cpu state */ 81 signed char polarization; /* physical polarization */ 82 u16 address; /* physical cpu address */ 83 }; 84 85 static u8 boot_core_type; 86 static struct pcpu pcpu_devices[NR_CPUS]; 87 88 unsigned int smp_cpu_mt_shift; 89 EXPORT_SYMBOL(smp_cpu_mt_shift); 90 91 unsigned int smp_cpu_mtid; 92 EXPORT_SYMBOL(smp_cpu_mtid); 93 94 #ifdef CONFIG_CRASH_DUMP 95 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; 96 #endif 97 98 static unsigned int smp_max_threads __initdata = -1U; 99 100 static int __init early_nosmt(char *s) 101 { 102 smp_max_threads = 1; 103 return 0; 104 } 105 early_param("nosmt", early_nosmt); 106 107 static int __init early_smt(char *s) 108 { 109 get_option(&s, &smp_max_threads); 110 return 0; 111 } 112 early_param("smt", early_smt); 113 114 /* 115 * The smp_cpu_state_mutex must be held when changing the state or polarization 116 * member of a pcpu data structure within the pcpu_devices arreay. 117 */ 118 DEFINE_MUTEX(smp_cpu_state_mutex); 119 120 /* 121 * Signal processor helper functions. 122 */ 123 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm) 124 { 125 int cc; 126 127 while (1) { 128 cc = __pcpu_sigp(addr, order, parm, NULL); 129 if (cc != SIGP_CC_BUSY) 130 return cc; 131 cpu_relax(); 132 } 133 } 134 135 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 136 { 137 int cc, retry; 138 139 for (retry = 0; ; retry++) { 140 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); 141 if (cc != SIGP_CC_BUSY) 142 break; 143 if (retry >= 3) 144 udelay(10); 145 } 146 return cc; 147 } 148 149 static inline int pcpu_stopped(struct pcpu *pcpu) 150 { 151 u32 status; 152 153 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 154 0, &status) != SIGP_CC_STATUS_STORED) 155 return 0; 156 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 157 } 158 159 static inline int pcpu_running(struct pcpu *pcpu) 160 { 161 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 162 0, NULL) != SIGP_CC_STATUS_STORED) 163 return 1; 164 /* Status stored condition code is equivalent to cpu not running. */ 165 return 0; 166 } 167 168 /* 169 * Find struct pcpu by cpu address. 170 */ 171 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) 172 { 173 int cpu; 174 175 for_each_cpu(cpu, mask) 176 if (pcpu_devices[cpu].address == address) 177 return pcpu_devices + cpu; 178 return NULL; 179 } 180 181 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 182 { 183 int order; 184 185 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 186 return; 187 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 188 pcpu->ec_clk = get_tod_clock_fast(); 189 pcpu_sigp_retry(pcpu, order, 0); 190 } 191 192 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 193 { 194 unsigned long async_stack, nodat_stack, mcck_stack; 195 struct lowcore *lc; 196 197 if (pcpu != &pcpu_devices[0]) { 198 pcpu->lowcore = (struct lowcore *) 199 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 200 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 201 if (!pcpu->lowcore || !nodat_stack) 202 goto out; 203 } else { 204 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 205 } 206 async_stack = stack_alloc(); 207 mcck_stack = stack_alloc(); 208 if (!async_stack || !mcck_stack) 209 goto out_stack; 210 lc = pcpu->lowcore; 211 memcpy(lc, &S390_lowcore, 512); 212 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 213 lc->async_stack = async_stack + STACK_INIT_OFFSET; 214 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; 215 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; 216 lc->cpu_nr = cpu; 217 lc->spinlock_lockval = arch_spin_lockval(cpu); 218 lc->spinlock_index = 0; 219 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 220 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); 221 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); 222 if (nmi_alloc_per_cpu(lc)) 223 goto out_stack; 224 lowcore_ptr[cpu] = lc; 225 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 226 return 0; 227 228 out_stack: 229 stack_free(mcck_stack); 230 stack_free(async_stack); 231 out: 232 if (pcpu != &pcpu_devices[0]) { 233 free_pages(nodat_stack, THREAD_SIZE_ORDER); 234 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 235 } 236 return -ENOMEM; 237 } 238 239 static void pcpu_free_lowcore(struct pcpu *pcpu) 240 { 241 unsigned long async_stack, nodat_stack, mcck_stack, lowcore; 242 243 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 244 async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; 245 mcck_stack = pcpu->lowcore->mcck_stack - STACK_INIT_OFFSET; 246 lowcore = (unsigned long) pcpu->lowcore; 247 248 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 249 lowcore_ptr[pcpu - pcpu_devices] = NULL; 250 nmi_free_per_cpu(pcpu->lowcore); 251 stack_free(async_stack); 252 stack_free(mcck_stack); 253 if (pcpu == &pcpu_devices[0]) 254 return; 255 free_pages(nodat_stack, THREAD_SIZE_ORDER); 256 free_pages(lowcore, LC_ORDER); 257 } 258 259 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 260 { 261 struct lowcore *lc = pcpu->lowcore; 262 263 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); 264 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 265 lc->cpu_nr = cpu; 266 lc->spinlock_lockval = arch_spin_lockval(cpu); 267 lc->spinlock_index = 0; 268 lc->percpu_offset = __per_cpu_offset[cpu]; 269 lc->kernel_asce = S390_lowcore.kernel_asce; 270 lc->user_asce = s390_invalid_asce; 271 lc->machine_flags = S390_lowcore.machine_flags; 272 lc->user_timer = lc->system_timer = 273 lc->steal_timer = lc->avg_steal_timer = 0; 274 __ctl_store(lc->cregs_save_area, 0, 15); 275 lc->cregs_save_area[1] = lc->kernel_asce; 276 lc->cregs_save_area[7] = lc->user_asce; 277 save_access_regs((unsigned int *) lc->access_regs_save_area); 278 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 279 sizeof(lc->stfle_fac_list)); 280 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, 281 sizeof(lc->alt_stfle_fac_list)); 282 arch_spin_lock_setup(cpu); 283 } 284 285 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 286 { 287 struct lowcore *lc = pcpu->lowcore; 288 289 lc->kernel_stack = (unsigned long) task_stack_page(tsk) 290 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 291 lc->current_task = (unsigned long) tsk; 292 lc->lpp = LPP_MAGIC; 293 lc->current_pid = tsk->pid; 294 lc->user_timer = tsk->thread.user_timer; 295 lc->guest_timer = tsk->thread.guest_timer; 296 lc->system_timer = tsk->thread.system_timer; 297 lc->hardirq_timer = tsk->thread.hardirq_timer; 298 lc->softirq_timer = tsk->thread.softirq_timer; 299 lc->steal_timer = 0; 300 } 301 302 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) 303 { 304 struct lowcore *lc = pcpu->lowcore; 305 306 lc->restart_stack = lc->nodat_stack; 307 lc->restart_fn = (unsigned long) func; 308 lc->restart_data = (unsigned long) data; 309 lc->restart_source = -1UL; 310 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); 311 } 312 313 /* 314 * Call function via PSW restart on pcpu and stop the current cpu. 315 */ 316 static void __pcpu_delegate(void (*func)(void*), void *data) 317 { 318 func(data); /* should not return */ 319 } 320 321 static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu, 322 void (*func)(void *), 323 void *data, unsigned long stack) 324 { 325 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 326 unsigned long source_cpu = stap(); 327 328 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 329 if (pcpu->address == source_cpu) 330 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data); 331 /* Stop target cpu (if func returns this stops the current cpu). */ 332 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 333 /* Restart func on the target cpu and stop the current cpu. */ 334 mem_assign_absolute(lc->restart_stack, stack); 335 mem_assign_absolute(lc->restart_fn, (unsigned long) func); 336 mem_assign_absolute(lc->restart_data, (unsigned long) data); 337 mem_assign_absolute(lc->restart_source, source_cpu); 338 __bpon(); 339 asm volatile( 340 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 341 " brc 2,0b # busy, try again\n" 342 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 343 " brc 2,1b # busy, try again\n" 344 : : "d" (pcpu->address), "d" (source_cpu), 345 "K" (SIGP_RESTART), "K" (SIGP_STOP) 346 : "0", "1", "cc"); 347 for (;;) ; 348 } 349 350 /* 351 * Enable additional logical cpus for multi-threading. 352 */ 353 static int pcpu_set_smt(unsigned int mtid) 354 { 355 int cc; 356 357 if (smp_cpu_mtid == mtid) 358 return 0; 359 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL); 360 if (cc == 0) { 361 smp_cpu_mtid = mtid; 362 smp_cpu_mt_shift = 0; 363 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) 364 smp_cpu_mt_shift++; 365 pcpu_devices[0].address = stap(); 366 } 367 return cc; 368 } 369 370 /* 371 * Call function on an online CPU. 372 */ 373 void smp_call_online_cpu(void (*func)(void *), void *data) 374 { 375 struct pcpu *pcpu; 376 377 /* Use the current cpu if it is online. */ 378 pcpu = pcpu_find_address(cpu_online_mask, stap()); 379 if (!pcpu) 380 /* Use the first online cpu. */ 381 pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 382 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 383 } 384 385 /* 386 * Call function on the ipl CPU. 387 */ 388 void smp_call_ipl_cpu(void (*func)(void *), void *data) 389 { 390 struct lowcore *lc = pcpu_devices->lowcore; 391 392 if (pcpu_devices[0].address == stap()) 393 lc = &S390_lowcore; 394 395 pcpu_delegate(&pcpu_devices[0], func, data, 396 lc->nodat_stack); 397 } 398 399 int smp_find_processor_id(u16 address) 400 { 401 int cpu; 402 403 for_each_present_cpu(cpu) 404 if (pcpu_devices[cpu].address == address) 405 return cpu; 406 return -1; 407 } 408 409 void schedule_mcck_handler(void) 410 { 411 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending); 412 } 413 414 bool notrace arch_vcpu_is_preempted(int cpu) 415 { 416 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) 417 return false; 418 if (pcpu_running(pcpu_devices + cpu)) 419 return false; 420 return true; 421 } 422 EXPORT_SYMBOL(arch_vcpu_is_preempted); 423 424 void notrace smp_yield_cpu(int cpu) 425 { 426 if (!MACHINE_HAS_DIAG9C) 427 return; 428 diag_stat_inc_norecursion(DIAG_STAT_X09C); 429 asm volatile("diag %0,0,0x9c" 430 : : "d" (pcpu_devices[cpu].address)); 431 } 432 EXPORT_SYMBOL_GPL(smp_yield_cpu); 433 434 /* 435 * Send cpus emergency shutdown signal. This gives the cpus the 436 * opportunity to complete outstanding interrupts. 437 */ 438 void notrace smp_emergency_stop(void) 439 { 440 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 441 static cpumask_t cpumask; 442 u64 end; 443 int cpu; 444 445 arch_spin_lock(&lock); 446 cpumask_copy(&cpumask, cpu_online_mask); 447 cpumask_clear_cpu(smp_processor_id(), &cpumask); 448 449 end = get_tod_clock() + (1000000UL << 12); 450 for_each_cpu(cpu, &cpumask) { 451 struct pcpu *pcpu = pcpu_devices + cpu; 452 set_bit(ec_stop_cpu, &pcpu->ec_mask); 453 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 454 0, NULL) == SIGP_CC_BUSY && 455 get_tod_clock() < end) 456 cpu_relax(); 457 } 458 while (get_tod_clock() < end) { 459 for_each_cpu(cpu, &cpumask) 460 if (pcpu_stopped(pcpu_devices + cpu)) 461 cpumask_clear_cpu(cpu, &cpumask); 462 if (cpumask_empty(&cpumask)) 463 break; 464 cpu_relax(); 465 } 466 arch_spin_unlock(&lock); 467 } 468 NOKPROBE_SYMBOL(smp_emergency_stop); 469 470 /* 471 * Stop all cpus but the current one. 472 */ 473 void smp_send_stop(void) 474 { 475 int cpu; 476 477 /* Disable all interrupts/machine checks */ 478 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 479 trace_hardirqs_off(); 480 481 debug_set_critical(); 482 483 if (oops_in_progress) 484 smp_emergency_stop(); 485 486 /* stop all processors */ 487 for_each_online_cpu(cpu) { 488 if (cpu == smp_processor_id()) 489 continue; 490 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0); 491 while (!pcpu_stopped(pcpu_devices + cpu)) 492 cpu_relax(); 493 } 494 } 495 496 /* 497 * This is the main routine where commands issued by other 498 * cpus are handled. 499 */ 500 static void smp_handle_ext_call(void) 501 { 502 unsigned long bits; 503 504 /* handle bit signal external calls */ 505 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); 506 if (test_bit(ec_stop_cpu, &bits)) 507 smp_stop_cpu(); 508 if (test_bit(ec_schedule, &bits)) 509 scheduler_ipi(); 510 if (test_bit(ec_call_function_single, &bits)) 511 generic_smp_call_function_single_interrupt(); 512 if (test_bit(ec_mcck_pending, &bits)) 513 __s390_handle_mcck(); 514 if (test_bit(ec_irq_work, &bits)) 515 irq_work_run(); 516 } 517 518 static void do_ext_call_interrupt(struct ext_code ext_code, 519 unsigned int param32, unsigned long param64) 520 { 521 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); 522 smp_handle_ext_call(); 523 } 524 525 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 526 { 527 int cpu; 528 529 for_each_cpu(cpu, mask) 530 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 531 } 532 533 void arch_send_call_function_single_ipi(int cpu) 534 { 535 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 536 } 537 538 /* 539 * this function sends a 'reschedule' IPI to another CPU. 540 * it goes straight through and wastes no time serializing 541 * anything. Worst case is that we lose a reschedule ... 542 */ 543 void smp_send_reschedule(int cpu) 544 { 545 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 546 } 547 548 #ifdef CONFIG_IRQ_WORK 549 void arch_irq_work_raise(void) 550 { 551 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work); 552 } 553 #endif 554 555 /* 556 * parameter area for the set/clear control bit callbacks 557 */ 558 struct ec_creg_mask_parms { 559 unsigned long orval; 560 unsigned long andval; 561 int cr; 562 }; 563 564 /* 565 * callback for setting/clearing control bits 566 */ 567 static void smp_ctl_bit_callback(void *info) 568 { 569 struct ec_creg_mask_parms *pp = info; 570 unsigned long cregs[16]; 571 572 __ctl_store(cregs, 0, 15); 573 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 574 __ctl_load(cregs, 0, 15); 575 } 576 577 /* 578 * Set a bit in a control register of all cpus 579 */ 580 void smp_ctl_set_bit(int cr, int bit) 581 { 582 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 583 584 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 585 } 586 EXPORT_SYMBOL(smp_ctl_set_bit); 587 588 /* 589 * Clear a bit in a control register of all cpus 590 */ 591 void smp_ctl_clear_bit(int cr, int bit) 592 { 593 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 594 595 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 596 } 597 EXPORT_SYMBOL(smp_ctl_clear_bit); 598 599 #ifdef CONFIG_CRASH_DUMP 600 601 int smp_store_status(int cpu) 602 { 603 struct pcpu *pcpu = pcpu_devices + cpu; 604 unsigned long pa; 605 606 pa = __pa(&pcpu->lowcore->floating_pt_save_area); 607 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 608 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 609 return -EIO; 610 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) 611 return 0; 612 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK); 613 if (MACHINE_HAS_GS) 614 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK; 615 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 616 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 617 return -EIO; 618 return 0; 619 } 620 621 /* 622 * Collect CPU state of the previous, crashed system. 623 * There are four cases: 624 * 1) standard zfcp/nvme dump 625 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true 626 * The state for all CPUs except the boot CPU needs to be collected 627 * with sigp stop-and-store-status. The boot CPU state is located in 628 * the absolute lowcore of the memory stored in the HSA. The zcore code 629 * will copy the boot CPU state from the HSA. 630 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory) 631 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true 632 * The state for all CPUs except the boot CPU needs to be collected 633 * with sigp stop-and-store-status. The firmware or the boot-loader 634 * stored the registers of the boot CPU in the absolute lowcore in the 635 * memory of the old system. 636 * 3) kdump and the old kernel did not store the CPU state, 637 * or stand-alone kdump for DASD 638 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() 639 * The state for all CPUs except the boot CPU needs to be collected 640 * with sigp stop-and-store-status. The kexec code or the boot-loader 641 * stored the registers of the boot CPU in the memory of the old system. 642 * 4) kdump and the old kernel stored the CPU state 643 * condition: OLDMEM_BASE != NULL && is_kdump_kernel() 644 * This case does not exist for s390 anymore, setup_arch explicitly 645 * deactivates the elfcorehdr= kernel parameter 646 */ 647 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, 648 bool is_boot_cpu, unsigned long page) 649 { 650 __vector128 *vxrs = (__vector128 *) page; 651 652 if (is_boot_cpu) 653 vxrs = boot_cpu_vector_save_area; 654 else 655 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); 656 save_area_add_vxrs(sa, vxrs); 657 } 658 659 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, 660 bool is_boot_cpu, unsigned long page) 661 { 662 void *regs = (void *) page; 663 664 if (is_boot_cpu) 665 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); 666 else 667 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); 668 save_area_add_regs(sa, regs); 669 } 670 671 void __init smp_save_dump_cpus(void) 672 { 673 int addr, boot_cpu_addr, max_cpu_addr; 674 struct save_area *sa; 675 unsigned long page; 676 bool is_boot_cpu; 677 678 if (!(OLDMEM_BASE || is_ipl_type_dump())) 679 /* No previous system present, normal boot. */ 680 return; 681 /* Allocate a page as dumping area for the store status sigps */ 682 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); 683 if (!page) 684 panic("ERROR: Failed to allocate %lx bytes below %lx\n", 685 PAGE_SIZE, 1UL << 31); 686 687 /* Set multi-threading state to the previous system. */ 688 pcpu_set_smt(sclp.mtid_prev); 689 boot_cpu_addr = stap(); 690 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; 691 for (addr = 0; addr <= max_cpu_addr; addr++) { 692 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) == 693 SIGP_CC_NOT_OPERATIONAL) 694 continue; 695 is_boot_cpu = (addr == boot_cpu_addr); 696 /* Allocate save area */ 697 sa = save_area_alloc(is_boot_cpu); 698 if (!sa) 699 panic("could not allocate memory for save area\n"); 700 if (MACHINE_HAS_VX) 701 /* Get the vector registers */ 702 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); 703 /* 704 * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers 705 * of the boot CPU are stored in the HSA. To retrieve 706 * these registers an SCLP request is required which is 707 * done by drivers/s390/char/zcore.c:init_cpu_info() 708 */ 709 if (!is_boot_cpu || OLDMEM_BASE) 710 /* Get the CPU registers */ 711 smp_save_cpu_regs(sa, addr, is_boot_cpu, page); 712 } 713 memblock_free(page, PAGE_SIZE); 714 diag_dma_ops.diag308_reset(); 715 pcpu_set_smt(0); 716 } 717 #endif /* CONFIG_CRASH_DUMP */ 718 719 void smp_cpu_set_polarization(int cpu, int val) 720 { 721 pcpu_devices[cpu].polarization = val; 722 } 723 724 int smp_cpu_get_polarization(int cpu) 725 { 726 return pcpu_devices[cpu].polarization; 727 } 728 729 int smp_cpu_get_cpu_address(int cpu) 730 { 731 return pcpu_devices[cpu].address; 732 } 733 734 static void __ref smp_get_core_info(struct sclp_core_info *info, int early) 735 { 736 static int use_sigp_detection; 737 int address; 738 739 if (use_sigp_detection || sclp_get_core_info(info, early)) { 740 use_sigp_detection = 1; 741 for (address = 0; 742 address < (SCLP_MAX_CORES << smp_cpu_mt_shift); 743 address += (1U << smp_cpu_mt_shift)) { 744 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) == 745 SIGP_CC_NOT_OPERATIONAL) 746 continue; 747 info->core[info->configured].core_id = 748 address >> smp_cpu_mt_shift; 749 info->configured++; 750 } 751 info->combined = info->configured; 752 } 753 } 754 755 static int smp_add_present_cpu(int cpu); 756 757 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail, 758 bool configured, bool early) 759 { 760 struct pcpu *pcpu; 761 int cpu, nr, i; 762 u16 address; 763 764 nr = 0; 765 if (sclp.has_core_type && core->type != boot_core_type) 766 return nr; 767 cpu = cpumask_first(avail); 768 address = core->core_id << smp_cpu_mt_shift; 769 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) { 770 if (pcpu_find_address(cpu_present_mask, address + i)) 771 continue; 772 pcpu = pcpu_devices + cpu; 773 pcpu->address = address + i; 774 if (configured) 775 pcpu->state = CPU_STATE_CONFIGURED; 776 else 777 pcpu->state = CPU_STATE_STANDBY; 778 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 779 set_cpu_present(cpu, true); 780 if (!early && smp_add_present_cpu(cpu) != 0) 781 set_cpu_present(cpu, false); 782 else 783 nr++; 784 cpumask_clear_cpu(cpu, avail); 785 cpu = cpumask_next(cpu, avail); 786 } 787 return nr; 788 } 789 790 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early) 791 { 792 struct sclp_core_entry *core; 793 static cpumask_t avail; 794 bool configured; 795 u16 core_id; 796 int nr, i; 797 798 get_online_cpus(); 799 mutex_lock(&smp_cpu_state_mutex); 800 nr = 0; 801 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 802 /* 803 * Add IPL core first (which got logical CPU number 0) to make sure 804 * that all SMT threads get subsequent logical CPU numbers. 805 */ 806 if (early) { 807 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift; 808 for (i = 0; i < info->configured; i++) { 809 core = &info->core[i]; 810 if (core->core_id == core_id) { 811 nr += smp_add_core(core, &avail, true, early); 812 break; 813 } 814 } 815 } 816 for (i = 0; i < info->combined; i++) { 817 configured = i < info->configured; 818 nr += smp_add_core(&info->core[i], &avail, configured, early); 819 } 820 mutex_unlock(&smp_cpu_state_mutex); 821 put_online_cpus(); 822 return nr; 823 } 824 825 void __init smp_detect_cpus(void) 826 { 827 unsigned int cpu, mtid, c_cpus, s_cpus; 828 struct sclp_core_info *info; 829 u16 address; 830 831 /* Get CPU information */ 832 info = memblock_alloc(sizeof(*info), 8); 833 if (!info) 834 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 835 __func__, sizeof(*info), 8); 836 smp_get_core_info(info, 1); 837 /* Find boot CPU type */ 838 if (sclp.has_core_type) { 839 address = stap(); 840 for (cpu = 0; cpu < info->combined; cpu++) 841 if (info->core[cpu].core_id == address) { 842 /* The boot cpu dictates the cpu type. */ 843 boot_core_type = info->core[cpu].type; 844 break; 845 } 846 if (cpu >= info->combined) 847 panic("Could not find boot CPU type"); 848 } 849 850 /* Set multi-threading state for the current system */ 851 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 852 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 853 pcpu_set_smt(mtid); 854 855 /* Print number of CPUs */ 856 c_cpus = s_cpus = 0; 857 for (cpu = 0; cpu < info->combined; cpu++) { 858 if (sclp.has_core_type && 859 info->core[cpu].type != boot_core_type) 860 continue; 861 if (cpu < info->configured) 862 c_cpus += smp_cpu_mtid + 1; 863 else 864 s_cpus += smp_cpu_mtid + 1; 865 } 866 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 867 868 /* Add CPUs present at boot */ 869 __smp_rescan_cpus(info, true); 870 memblock_free_early((unsigned long)info, sizeof(*info)); 871 } 872 873 static void smp_init_secondary(void) 874 { 875 int cpu = raw_smp_processor_id(); 876 877 S390_lowcore.last_update_clock = get_tod_clock(); 878 restore_access_regs(S390_lowcore.access_regs_save_area); 879 cpu_init(); 880 rcu_cpu_starting(cpu); 881 preempt_disable(); 882 init_cpu_timer(); 883 vtime_init(); 884 vdso_getcpu_init(); 885 pfault_init(); 886 notify_cpu_starting(cpu); 887 if (topology_cpu_dedicated(cpu)) 888 set_cpu_flag(CIF_DEDICATED_CPU); 889 else 890 clear_cpu_flag(CIF_DEDICATED_CPU); 891 set_cpu_online(cpu, true); 892 update_cpu_masks(); 893 inc_irq_stat(CPU_RST); 894 local_irq_enable(); 895 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 896 } 897 898 /* 899 * Activate a secondary processor. 900 */ 901 static void __no_sanitize_address smp_start_secondary(void *cpuvoid) 902 { 903 S390_lowcore.restart_stack = (unsigned long) restart_stack; 904 S390_lowcore.restart_fn = (unsigned long) do_restart; 905 S390_lowcore.restart_data = 0; 906 S390_lowcore.restart_source = -1UL; 907 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 908 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 909 CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); 910 } 911 912 /* Upping and downing of CPUs */ 913 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 914 { 915 struct pcpu *pcpu = pcpu_devices + cpu; 916 int rc; 917 918 if (pcpu->state != CPU_STATE_CONFIGURED) 919 return -EIO; 920 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 921 SIGP_CC_ORDER_CODE_ACCEPTED) 922 return -EIO; 923 924 rc = pcpu_alloc_lowcore(pcpu, cpu); 925 if (rc) 926 return rc; 927 pcpu_prepare_secondary(pcpu, cpu); 928 pcpu_attach_task(pcpu, tidle); 929 pcpu_start_fn(pcpu, smp_start_secondary, NULL); 930 /* Wait until cpu puts itself in the online & active maps */ 931 while (!cpu_online(cpu)) 932 cpu_relax(); 933 return 0; 934 } 935 936 static unsigned int setup_possible_cpus __initdata; 937 938 static int __init _setup_possible_cpus(char *s) 939 { 940 get_option(&s, &setup_possible_cpus); 941 return 0; 942 } 943 early_param("possible_cpus", _setup_possible_cpus); 944 945 int __cpu_disable(void) 946 { 947 unsigned long cregs[16]; 948 949 /* Handle possible pending IPIs */ 950 smp_handle_ext_call(); 951 set_cpu_online(smp_processor_id(), false); 952 update_cpu_masks(); 953 /* Disable pseudo page faults on this cpu. */ 954 pfault_fini(); 955 /* Disable interrupt sources via control register. */ 956 __ctl_store(cregs, 0, 15); 957 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 958 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 959 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 960 __ctl_load(cregs, 0, 15); 961 clear_cpu_flag(CIF_NOHZ_DELAY); 962 return 0; 963 } 964 965 void __cpu_die(unsigned int cpu) 966 { 967 struct pcpu *pcpu; 968 969 /* Wait until target cpu is down */ 970 pcpu = pcpu_devices + cpu; 971 while (!pcpu_stopped(pcpu)) 972 cpu_relax(); 973 pcpu_free_lowcore(pcpu); 974 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); 975 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); 976 } 977 978 void __noreturn cpu_die(void) 979 { 980 idle_task_exit(); 981 __bpon(); 982 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 983 for (;;) ; 984 } 985 986 void __init smp_fill_possible_mask(void) 987 { 988 unsigned int possible, sclp_max, cpu; 989 990 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1; 991 sclp_max = min(smp_max_threads, sclp_max); 992 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids; 993 possible = setup_possible_cpus ?: nr_cpu_ids; 994 possible = min(possible, sclp_max); 995 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 996 set_cpu_possible(cpu, true); 997 } 998 999 void __init smp_prepare_cpus(unsigned int max_cpus) 1000 { 1001 /* request the 0x1201 emergency signal external interrupt */ 1002 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 1003 panic("Couldn't request external interrupt 0x1201"); 1004 /* request the 0x1202 external call external interrupt */ 1005 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 1006 panic("Couldn't request external interrupt 0x1202"); 1007 } 1008 1009 void __init smp_prepare_boot_cpu(void) 1010 { 1011 struct pcpu *pcpu = pcpu_devices; 1012 1013 WARN_ON(!cpu_present(0) || !cpu_online(0)); 1014 pcpu->state = CPU_STATE_CONFIGURED; 1015 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 1016 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 1017 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 1018 } 1019 1020 void __init smp_setup_processor_id(void) 1021 { 1022 pcpu_devices[0].address = stap(); 1023 S390_lowcore.cpu_nr = 0; 1024 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 1025 S390_lowcore.spinlock_index = 0; 1026 } 1027 1028 /* 1029 * the frequency of the profiling timer can be changed 1030 * by writing a multiplier value into /proc/profile. 1031 * 1032 * usually you want to run this on all CPUs ;) 1033 */ 1034 int setup_profiling_timer(unsigned int multiplier) 1035 { 1036 return 0; 1037 } 1038 1039 static ssize_t cpu_configure_show(struct device *dev, 1040 struct device_attribute *attr, char *buf) 1041 { 1042 ssize_t count; 1043 1044 mutex_lock(&smp_cpu_state_mutex); 1045 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 1046 mutex_unlock(&smp_cpu_state_mutex); 1047 return count; 1048 } 1049 1050 static ssize_t cpu_configure_store(struct device *dev, 1051 struct device_attribute *attr, 1052 const char *buf, size_t count) 1053 { 1054 struct pcpu *pcpu; 1055 int cpu, val, rc, i; 1056 char delim; 1057 1058 if (sscanf(buf, "%d %c", &val, &delim) != 1) 1059 return -EINVAL; 1060 if (val != 0 && val != 1) 1061 return -EINVAL; 1062 get_online_cpus(); 1063 mutex_lock(&smp_cpu_state_mutex); 1064 rc = -EBUSY; 1065 /* disallow configuration changes of online cpus and cpu 0 */ 1066 cpu = dev->id; 1067 cpu = smp_get_base_cpu(cpu); 1068 if (cpu == 0) 1069 goto out; 1070 for (i = 0; i <= smp_cpu_mtid; i++) 1071 if (cpu_online(cpu + i)) 1072 goto out; 1073 pcpu = pcpu_devices + cpu; 1074 rc = 0; 1075 switch (val) { 1076 case 0: 1077 if (pcpu->state != CPU_STATE_CONFIGURED) 1078 break; 1079 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); 1080 if (rc) 1081 break; 1082 for (i = 0; i <= smp_cpu_mtid; i++) { 1083 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1084 continue; 1085 pcpu[i].state = CPU_STATE_STANDBY; 1086 smp_cpu_set_polarization(cpu + i, 1087 POLARIZATION_UNKNOWN); 1088 } 1089 topology_expect_change(); 1090 break; 1091 case 1: 1092 if (pcpu->state != CPU_STATE_STANDBY) 1093 break; 1094 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); 1095 if (rc) 1096 break; 1097 for (i = 0; i <= smp_cpu_mtid; i++) { 1098 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1099 continue; 1100 pcpu[i].state = CPU_STATE_CONFIGURED; 1101 smp_cpu_set_polarization(cpu + i, 1102 POLARIZATION_UNKNOWN); 1103 } 1104 topology_expect_change(); 1105 break; 1106 default: 1107 break; 1108 } 1109 out: 1110 mutex_unlock(&smp_cpu_state_mutex); 1111 put_online_cpus(); 1112 return rc ? rc : count; 1113 } 1114 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 1115 1116 static ssize_t show_cpu_address(struct device *dev, 1117 struct device_attribute *attr, char *buf) 1118 { 1119 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 1120 } 1121 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 1122 1123 static struct attribute *cpu_common_attrs[] = { 1124 &dev_attr_configure.attr, 1125 &dev_attr_address.attr, 1126 NULL, 1127 }; 1128 1129 static struct attribute_group cpu_common_attr_group = { 1130 .attrs = cpu_common_attrs, 1131 }; 1132 1133 static struct attribute *cpu_online_attrs[] = { 1134 &dev_attr_idle_count.attr, 1135 &dev_attr_idle_time_us.attr, 1136 NULL, 1137 }; 1138 1139 static struct attribute_group cpu_online_attr_group = { 1140 .attrs = cpu_online_attrs, 1141 }; 1142 1143 static int smp_cpu_online(unsigned int cpu) 1144 { 1145 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1146 1147 return sysfs_create_group(&s->kobj, &cpu_online_attr_group); 1148 } 1149 1150 static int smp_cpu_pre_down(unsigned int cpu) 1151 { 1152 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1153 1154 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 1155 return 0; 1156 } 1157 1158 static int smp_add_present_cpu(int cpu) 1159 { 1160 struct device *s; 1161 struct cpu *c; 1162 int rc; 1163 1164 c = kzalloc(sizeof(*c), GFP_KERNEL); 1165 if (!c) 1166 return -ENOMEM; 1167 per_cpu(cpu_device, cpu) = c; 1168 s = &c->dev; 1169 c->hotpluggable = 1; 1170 rc = register_cpu(c, cpu); 1171 if (rc) 1172 goto out; 1173 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 1174 if (rc) 1175 goto out_cpu; 1176 rc = topology_cpu_init(c); 1177 if (rc) 1178 goto out_topology; 1179 return 0; 1180 1181 out_topology: 1182 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 1183 out_cpu: 1184 unregister_cpu(c); 1185 out: 1186 return rc; 1187 } 1188 1189 int __ref smp_rescan_cpus(void) 1190 { 1191 struct sclp_core_info *info; 1192 int nr; 1193 1194 info = kzalloc(sizeof(*info), GFP_KERNEL); 1195 if (!info) 1196 return -ENOMEM; 1197 smp_get_core_info(info, 0); 1198 nr = __smp_rescan_cpus(info, false); 1199 kfree(info); 1200 if (nr) 1201 topology_schedule_update(); 1202 return 0; 1203 } 1204 1205 static ssize_t __ref rescan_store(struct device *dev, 1206 struct device_attribute *attr, 1207 const char *buf, 1208 size_t count) 1209 { 1210 int rc; 1211 1212 rc = lock_device_hotplug_sysfs(); 1213 if (rc) 1214 return rc; 1215 rc = smp_rescan_cpus(); 1216 unlock_device_hotplug(); 1217 return rc ? rc : count; 1218 } 1219 static DEVICE_ATTR_WO(rescan); 1220 1221 static int __init s390_smp_init(void) 1222 { 1223 int cpu, rc = 0; 1224 1225 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1226 if (rc) 1227 return rc; 1228 for_each_present_cpu(cpu) { 1229 rc = smp_add_present_cpu(cpu); 1230 if (rc) 1231 goto out; 1232 } 1233 1234 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online", 1235 smp_cpu_online, smp_cpu_pre_down); 1236 rc = rc <= 0 ? rc : 0; 1237 out: 1238 return rc; 1239 } 1240 subsys_initcall(s390_smp_init); 1241