1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * SMP related functions 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Denis Joseph Barrow, 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 9 * 10 * based on other smp stuff by 11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 12 * (c) 1998 Ingo Molnar 13 * 14 * The code outside of smp.c uses logical cpu numbers, only smp.c does 15 * the translation of logical to physical cpu ids. All new code that 16 * operates on physical cpu numbers needs to go into smp.c. 17 */ 18 19 #define KMSG_COMPONENT "cpu" 20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 21 22 #include <linux/workqueue.h> 23 #include <linux/memblock.h> 24 #include <linux/export.h> 25 #include <linux/init.h> 26 #include <linux/mm.h> 27 #include <linux/err.h> 28 #include <linux/spinlock.h> 29 #include <linux/kernel_stat.h> 30 #include <linux/delay.h> 31 #include <linux/interrupt.h> 32 #include <linux/irqflags.h> 33 #include <linux/cpu.h> 34 #include <linux/slab.h> 35 #include <linux/sched/hotplug.h> 36 #include <linux/sched/task_stack.h> 37 #include <linux/crash_dump.h> 38 #include <linux/kprobes.h> 39 #include <asm/asm-offsets.h> 40 #include <asm/diag.h> 41 #include <asm/switch_to.h> 42 #include <asm/facility.h> 43 #include <asm/ipl.h> 44 #include <asm/setup.h> 45 #include <asm/irq.h> 46 #include <asm/tlbflush.h> 47 #include <asm/vtimer.h> 48 #include <asm/lowcore.h> 49 #include <asm/sclp.h> 50 #include <asm/vdso.h> 51 #include <asm/debug.h> 52 #include <asm/os_info.h> 53 #include <asm/sigp.h> 54 #include <asm/idle.h> 55 #include <asm/nmi.h> 56 #include <asm/topology.h> 57 #include "entry.h" 58 59 enum { 60 ec_schedule = 0, 61 ec_call_function_single, 62 ec_stop_cpu, 63 }; 64 65 enum { 66 CPU_STATE_STANDBY, 67 CPU_STATE_CONFIGURED, 68 }; 69 70 static DEFINE_PER_CPU(struct cpu *, cpu_device); 71 72 struct pcpu { 73 struct lowcore *lowcore; /* lowcore page(s) for the cpu */ 74 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 75 unsigned long ec_clk; /* sigp timestamp for ec_xxx */ 76 signed char state; /* physical cpu state */ 77 signed char polarization; /* physical polarization */ 78 u16 address; /* physical cpu address */ 79 }; 80 81 static u8 boot_core_type; 82 static struct pcpu pcpu_devices[NR_CPUS]; 83 84 unsigned int smp_cpu_mt_shift; 85 EXPORT_SYMBOL(smp_cpu_mt_shift); 86 87 unsigned int smp_cpu_mtid; 88 EXPORT_SYMBOL(smp_cpu_mtid); 89 90 #ifdef CONFIG_CRASH_DUMP 91 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; 92 #endif 93 94 static unsigned int smp_max_threads __initdata = -1U; 95 96 static int __init early_nosmt(char *s) 97 { 98 smp_max_threads = 1; 99 return 0; 100 } 101 early_param("nosmt", early_nosmt); 102 103 static int __init early_smt(char *s) 104 { 105 get_option(&s, &smp_max_threads); 106 return 0; 107 } 108 early_param("smt", early_smt); 109 110 /* 111 * The smp_cpu_state_mutex must be held when changing the state or polarization 112 * member of a pcpu data structure within the pcpu_devices arreay. 113 */ 114 DEFINE_MUTEX(smp_cpu_state_mutex); 115 116 /* 117 * Signal processor helper functions. 118 */ 119 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm) 120 { 121 int cc; 122 123 while (1) { 124 cc = __pcpu_sigp(addr, order, parm, NULL); 125 if (cc != SIGP_CC_BUSY) 126 return cc; 127 cpu_relax(); 128 } 129 } 130 131 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 132 { 133 int cc, retry; 134 135 for (retry = 0; ; retry++) { 136 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); 137 if (cc != SIGP_CC_BUSY) 138 break; 139 if (retry >= 3) 140 udelay(10); 141 } 142 return cc; 143 } 144 145 static inline int pcpu_stopped(struct pcpu *pcpu) 146 { 147 u32 uninitialized_var(status); 148 149 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 150 0, &status) != SIGP_CC_STATUS_STORED) 151 return 0; 152 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 153 } 154 155 static inline int pcpu_running(struct pcpu *pcpu) 156 { 157 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 158 0, NULL) != SIGP_CC_STATUS_STORED) 159 return 1; 160 /* Status stored condition code is equivalent to cpu not running. */ 161 return 0; 162 } 163 164 /* 165 * Find struct pcpu by cpu address. 166 */ 167 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address) 168 { 169 int cpu; 170 171 for_each_cpu(cpu, mask) 172 if (pcpu_devices[cpu].address == address) 173 return pcpu_devices + cpu; 174 return NULL; 175 } 176 177 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 178 { 179 int order; 180 181 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 182 return; 183 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 184 pcpu->ec_clk = get_tod_clock_fast(); 185 pcpu_sigp_retry(pcpu, order, 0); 186 } 187 188 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 189 { 190 unsigned long async_stack, nodat_stack; 191 struct lowcore *lc; 192 193 if (pcpu != &pcpu_devices[0]) { 194 pcpu->lowcore = (struct lowcore *) 195 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 196 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 197 if (!pcpu->lowcore || !nodat_stack) 198 goto out; 199 } else { 200 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 201 } 202 async_stack = stack_alloc(); 203 if (!async_stack) 204 goto out; 205 lc = pcpu->lowcore; 206 memcpy(lc, &S390_lowcore, 512); 207 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 208 lc->async_stack = async_stack + STACK_INIT_OFFSET; 209 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET; 210 lc->cpu_nr = cpu; 211 lc->spinlock_lockval = arch_spin_lockval(cpu); 212 lc->spinlock_index = 0; 213 lc->br_r1_trampoline = 0x07f1; /* br %r1 */ 214 if (nmi_alloc_per_cpu(lc)) 215 goto out_async; 216 if (vdso_alloc_per_cpu(lc)) 217 goto out_mcesa; 218 lowcore_ptr[cpu] = lc; 219 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 220 return 0; 221 222 out_mcesa: 223 nmi_free_per_cpu(lc); 224 out_async: 225 stack_free(async_stack); 226 out: 227 if (pcpu != &pcpu_devices[0]) { 228 free_pages(nodat_stack, THREAD_SIZE_ORDER); 229 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 230 } 231 return -ENOMEM; 232 } 233 234 #ifdef CONFIG_HOTPLUG_CPU 235 236 static void pcpu_free_lowcore(struct pcpu *pcpu) 237 { 238 unsigned long async_stack, nodat_stack, lowcore; 239 240 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET; 241 async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET; 242 lowcore = (unsigned long) pcpu->lowcore; 243 244 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 245 lowcore_ptr[pcpu - pcpu_devices] = NULL; 246 vdso_free_per_cpu(pcpu->lowcore); 247 nmi_free_per_cpu(pcpu->lowcore); 248 stack_free(async_stack); 249 if (pcpu == &pcpu_devices[0]) 250 return; 251 free_pages(nodat_stack, THREAD_SIZE_ORDER); 252 free_pages(lowcore, LC_ORDER); 253 } 254 255 #endif /* CONFIG_HOTPLUG_CPU */ 256 257 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 258 { 259 struct lowcore *lc = pcpu->lowcore; 260 261 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); 262 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 263 lc->cpu_nr = cpu; 264 lc->spinlock_lockval = arch_spin_lockval(cpu); 265 lc->spinlock_index = 0; 266 lc->percpu_offset = __per_cpu_offset[cpu]; 267 lc->kernel_asce = S390_lowcore.kernel_asce; 268 lc->machine_flags = S390_lowcore.machine_flags; 269 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 270 __ctl_store(lc->cregs_save_area, 0, 15); 271 save_access_regs((unsigned int *) lc->access_regs_save_area); 272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 273 sizeof(lc->stfle_fac_list)); 274 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list, 275 sizeof(lc->alt_stfle_fac_list)); 276 arch_spin_lock_setup(cpu); 277 } 278 279 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 280 { 281 struct lowcore *lc = pcpu->lowcore; 282 283 lc->kernel_stack = (unsigned long) task_stack_page(tsk) 284 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 285 lc->current_task = (unsigned long) tsk; 286 lc->lpp = LPP_MAGIC; 287 lc->current_pid = tsk->pid; 288 lc->user_timer = tsk->thread.user_timer; 289 lc->guest_timer = tsk->thread.guest_timer; 290 lc->system_timer = tsk->thread.system_timer; 291 lc->hardirq_timer = tsk->thread.hardirq_timer; 292 lc->softirq_timer = tsk->thread.softirq_timer; 293 lc->steal_timer = 0; 294 } 295 296 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) 297 { 298 struct lowcore *lc = pcpu->lowcore; 299 300 lc->restart_stack = lc->nodat_stack; 301 lc->restart_fn = (unsigned long) func; 302 lc->restart_data = (unsigned long) data; 303 lc->restart_source = -1UL; 304 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); 305 } 306 307 /* 308 * Call function via PSW restart on pcpu and stop the current cpu. 309 */ 310 static void __pcpu_delegate(void (*func)(void*), void *data) 311 { 312 func(data); /* should not return */ 313 } 314 315 static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu, 316 void (*func)(void *), 317 void *data, unsigned long stack) 318 { 319 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 320 unsigned long source_cpu = stap(); 321 322 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 323 if (pcpu->address == source_cpu) 324 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data); 325 /* Stop target cpu (if func returns this stops the current cpu). */ 326 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 327 /* Restart func on the target cpu and stop the current cpu. */ 328 mem_assign_absolute(lc->restart_stack, stack); 329 mem_assign_absolute(lc->restart_fn, (unsigned long) func); 330 mem_assign_absolute(lc->restart_data, (unsigned long) data); 331 mem_assign_absolute(lc->restart_source, source_cpu); 332 __bpon(); 333 asm volatile( 334 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 335 " brc 2,0b # busy, try again\n" 336 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 337 " brc 2,1b # busy, try again\n" 338 : : "d" (pcpu->address), "d" (source_cpu), 339 "K" (SIGP_RESTART), "K" (SIGP_STOP) 340 : "0", "1", "cc"); 341 for (;;) ; 342 } 343 344 /* 345 * Enable additional logical cpus for multi-threading. 346 */ 347 static int pcpu_set_smt(unsigned int mtid) 348 { 349 int cc; 350 351 if (smp_cpu_mtid == mtid) 352 return 0; 353 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL); 354 if (cc == 0) { 355 smp_cpu_mtid = mtid; 356 smp_cpu_mt_shift = 0; 357 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift)) 358 smp_cpu_mt_shift++; 359 pcpu_devices[0].address = stap(); 360 } 361 return cc; 362 } 363 364 /* 365 * Call function on an online CPU. 366 */ 367 void smp_call_online_cpu(void (*func)(void *), void *data) 368 { 369 struct pcpu *pcpu; 370 371 /* Use the current cpu if it is online. */ 372 pcpu = pcpu_find_address(cpu_online_mask, stap()); 373 if (!pcpu) 374 /* Use the first online cpu. */ 375 pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 376 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 377 } 378 379 /* 380 * Call function on the ipl CPU. 381 */ 382 void smp_call_ipl_cpu(void (*func)(void *), void *data) 383 { 384 struct lowcore *lc = pcpu_devices->lowcore; 385 386 if (pcpu_devices[0].address == stap()) 387 lc = &S390_lowcore; 388 389 pcpu_delegate(&pcpu_devices[0], func, data, 390 lc->nodat_stack); 391 } 392 393 int smp_find_processor_id(u16 address) 394 { 395 int cpu; 396 397 for_each_present_cpu(cpu) 398 if (pcpu_devices[cpu].address == address) 399 return cpu; 400 return -1; 401 } 402 403 bool arch_vcpu_is_preempted(int cpu) 404 { 405 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) 406 return false; 407 if (pcpu_running(pcpu_devices + cpu)) 408 return false; 409 return true; 410 } 411 EXPORT_SYMBOL(arch_vcpu_is_preempted); 412 413 void smp_yield_cpu(int cpu) 414 { 415 if (MACHINE_HAS_DIAG9C) { 416 diag_stat_inc_norecursion(DIAG_STAT_X09C); 417 asm volatile("diag %0,0,0x9c" 418 : : "d" (pcpu_devices[cpu].address)); 419 } else if (MACHINE_HAS_DIAG44) { 420 diag_stat_inc_norecursion(DIAG_STAT_X044); 421 asm volatile("diag 0,0,0x44"); 422 } 423 } 424 425 /* 426 * Send cpus emergency shutdown signal. This gives the cpus the 427 * opportunity to complete outstanding interrupts. 428 */ 429 void notrace smp_emergency_stop(void) 430 { 431 cpumask_t cpumask; 432 u64 end; 433 int cpu; 434 435 cpumask_copy(&cpumask, cpu_online_mask); 436 cpumask_clear_cpu(smp_processor_id(), &cpumask); 437 438 end = get_tod_clock() + (1000000UL << 12); 439 for_each_cpu(cpu, &cpumask) { 440 struct pcpu *pcpu = pcpu_devices + cpu; 441 set_bit(ec_stop_cpu, &pcpu->ec_mask); 442 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 443 0, NULL) == SIGP_CC_BUSY && 444 get_tod_clock() < end) 445 cpu_relax(); 446 } 447 while (get_tod_clock() < end) { 448 for_each_cpu(cpu, &cpumask) 449 if (pcpu_stopped(pcpu_devices + cpu)) 450 cpumask_clear_cpu(cpu, &cpumask); 451 if (cpumask_empty(&cpumask)) 452 break; 453 cpu_relax(); 454 } 455 } 456 NOKPROBE_SYMBOL(smp_emergency_stop); 457 458 /* 459 * Stop all cpus but the current one. 460 */ 461 void smp_send_stop(void) 462 { 463 int cpu; 464 465 /* Disable all interrupts/machine checks */ 466 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 467 trace_hardirqs_off(); 468 469 debug_set_critical(); 470 471 if (oops_in_progress) 472 smp_emergency_stop(); 473 474 /* stop all processors */ 475 for_each_online_cpu(cpu) { 476 if (cpu == smp_processor_id()) 477 continue; 478 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0); 479 while (!pcpu_stopped(pcpu_devices + cpu)) 480 cpu_relax(); 481 } 482 } 483 484 /* 485 * This is the main routine where commands issued by other 486 * cpus are handled. 487 */ 488 static void smp_handle_ext_call(void) 489 { 490 unsigned long bits; 491 492 /* handle bit signal external calls */ 493 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); 494 if (test_bit(ec_stop_cpu, &bits)) 495 smp_stop_cpu(); 496 if (test_bit(ec_schedule, &bits)) 497 scheduler_ipi(); 498 if (test_bit(ec_call_function_single, &bits)) 499 generic_smp_call_function_single_interrupt(); 500 } 501 502 static void do_ext_call_interrupt(struct ext_code ext_code, 503 unsigned int param32, unsigned long param64) 504 { 505 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); 506 smp_handle_ext_call(); 507 } 508 509 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 510 { 511 int cpu; 512 513 for_each_cpu(cpu, mask) 514 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 515 } 516 517 void arch_send_call_function_single_ipi(int cpu) 518 { 519 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 520 } 521 522 /* 523 * this function sends a 'reschedule' IPI to another CPU. 524 * it goes straight through and wastes no time serializing 525 * anything. Worst case is that we lose a reschedule ... 526 */ 527 void smp_send_reschedule(int cpu) 528 { 529 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 530 } 531 532 /* 533 * parameter area for the set/clear control bit callbacks 534 */ 535 struct ec_creg_mask_parms { 536 unsigned long orval; 537 unsigned long andval; 538 int cr; 539 }; 540 541 /* 542 * callback for setting/clearing control bits 543 */ 544 static void smp_ctl_bit_callback(void *info) 545 { 546 struct ec_creg_mask_parms *pp = info; 547 unsigned long cregs[16]; 548 549 __ctl_store(cregs, 0, 15); 550 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 551 __ctl_load(cregs, 0, 15); 552 } 553 554 /* 555 * Set a bit in a control register of all cpus 556 */ 557 void smp_ctl_set_bit(int cr, int bit) 558 { 559 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 560 561 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 562 } 563 EXPORT_SYMBOL(smp_ctl_set_bit); 564 565 /* 566 * Clear a bit in a control register of all cpus 567 */ 568 void smp_ctl_clear_bit(int cr, int bit) 569 { 570 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 571 572 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 573 } 574 EXPORT_SYMBOL(smp_ctl_clear_bit); 575 576 #ifdef CONFIG_CRASH_DUMP 577 578 int smp_store_status(int cpu) 579 { 580 struct pcpu *pcpu = pcpu_devices + cpu; 581 unsigned long pa; 582 583 pa = __pa(&pcpu->lowcore->floating_pt_save_area); 584 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, 585 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 586 return -EIO; 587 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS) 588 return 0; 589 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK); 590 if (MACHINE_HAS_GS) 591 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK; 592 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 593 pa) != SIGP_CC_ORDER_CODE_ACCEPTED) 594 return -EIO; 595 return 0; 596 } 597 598 /* 599 * Collect CPU state of the previous, crashed system. 600 * There are four cases: 601 * 1) standard zfcp dump 602 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP 603 * The state for all CPUs except the boot CPU needs to be collected 604 * with sigp stop-and-store-status. The boot CPU state is located in 605 * the absolute lowcore of the memory stored in the HSA. The zcore code 606 * will copy the boot CPU state from the HSA. 607 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) 608 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP 609 * The state for all CPUs except the boot CPU needs to be collected 610 * with sigp stop-and-store-status. The firmware or the boot-loader 611 * stored the registers of the boot CPU in the absolute lowcore in the 612 * memory of the old system. 613 * 3) kdump and the old kernel did not store the CPU state, 614 * or stand-alone kdump for DASD 615 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel() 616 * The state for all CPUs except the boot CPU needs to be collected 617 * with sigp stop-and-store-status. The kexec code or the boot-loader 618 * stored the registers of the boot CPU in the memory of the old system. 619 * 4) kdump and the old kernel stored the CPU state 620 * condition: OLDMEM_BASE != NULL && is_kdump_kernel() 621 * This case does not exist for s390 anymore, setup_arch explicitly 622 * deactivates the elfcorehdr= kernel parameter 623 */ 624 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, 625 bool is_boot_cpu, unsigned long page) 626 { 627 __vector128 *vxrs = (__vector128 *) page; 628 629 if (is_boot_cpu) 630 vxrs = boot_cpu_vector_save_area; 631 else 632 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); 633 save_area_add_vxrs(sa, vxrs); 634 } 635 636 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, 637 bool is_boot_cpu, unsigned long page) 638 { 639 void *regs = (void *) page; 640 641 if (is_boot_cpu) 642 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); 643 else 644 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); 645 save_area_add_regs(sa, regs); 646 } 647 648 void __init smp_save_dump_cpus(void) 649 { 650 int addr, boot_cpu_addr, max_cpu_addr; 651 struct save_area *sa; 652 unsigned long page; 653 bool is_boot_cpu; 654 655 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) 656 /* No previous system present, normal boot. */ 657 return; 658 /* Allocate a page as dumping area for the store status sigps */ 659 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); 660 if (!page) 661 panic("ERROR: Failed to allocate %lx bytes below %lx\n", 662 PAGE_SIZE, 1UL << 31); 663 664 /* Set multi-threading state to the previous system. */ 665 pcpu_set_smt(sclp.mtid_prev); 666 boot_cpu_addr = stap(); 667 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; 668 for (addr = 0; addr <= max_cpu_addr; addr++) { 669 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) == 670 SIGP_CC_NOT_OPERATIONAL) 671 continue; 672 is_boot_cpu = (addr == boot_cpu_addr); 673 /* Allocate save area */ 674 sa = save_area_alloc(is_boot_cpu); 675 if (!sa) 676 panic("could not allocate memory for save area\n"); 677 if (MACHINE_HAS_VX) 678 /* Get the vector registers */ 679 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); 680 /* 681 * For a zfcp dump OLDMEM_BASE == NULL and the registers 682 * of the boot CPU are stored in the HSA. To retrieve 683 * these registers an SCLP request is required which is 684 * done by drivers/s390/char/zcore.c:init_cpu_info() 685 */ 686 if (!is_boot_cpu || OLDMEM_BASE) 687 /* Get the CPU registers */ 688 smp_save_cpu_regs(sa, addr, is_boot_cpu, page); 689 } 690 memblock_free(page, PAGE_SIZE); 691 diag308_reset(); 692 pcpu_set_smt(0); 693 } 694 #endif /* CONFIG_CRASH_DUMP */ 695 696 void smp_cpu_set_polarization(int cpu, int val) 697 { 698 pcpu_devices[cpu].polarization = val; 699 } 700 701 int smp_cpu_get_polarization(int cpu) 702 { 703 return pcpu_devices[cpu].polarization; 704 } 705 706 static void __ref smp_get_core_info(struct sclp_core_info *info, int early) 707 { 708 static int use_sigp_detection; 709 int address; 710 711 if (use_sigp_detection || sclp_get_core_info(info, early)) { 712 use_sigp_detection = 1; 713 for (address = 0; 714 address < (SCLP_MAX_CORES << smp_cpu_mt_shift); 715 address += (1U << smp_cpu_mt_shift)) { 716 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) == 717 SIGP_CC_NOT_OPERATIONAL) 718 continue; 719 info->core[info->configured].core_id = 720 address >> smp_cpu_mt_shift; 721 info->configured++; 722 } 723 info->combined = info->configured; 724 } 725 } 726 727 static int smp_add_present_cpu(int cpu); 728 729 static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) 730 { 731 struct pcpu *pcpu; 732 cpumask_t avail; 733 int cpu, nr, i, j; 734 u16 address; 735 736 nr = 0; 737 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 738 cpu = cpumask_first(&avail); 739 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 740 if (sclp.has_core_type && info->core[i].type != boot_core_type) 741 continue; 742 address = info->core[i].core_id << smp_cpu_mt_shift; 743 for (j = 0; j <= smp_cpu_mtid; j++) { 744 if (pcpu_find_address(cpu_present_mask, address + j)) 745 continue; 746 pcpu = pcpu_devices + cpu; 747 pcpu->address = address + j; 748 pcpu->state = 749 (cpu >= info->configured*(smp_cpu_mtid + 1)) ? 750 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 751 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 752 set_cpu_present(cpu, true); 753 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 754 set_cpu_present(cpu, false); 755 else 756 nr++; 757 cpu = cpumask_next(cpu, &avail); 758 if (cpu >= nr_cpu_ids) 759 break; 760 } 761 } 762 return nr; 763 } 764 765 void __init smp_detect_cpus(void) 766 { 767 unsigned int cpu, mtid, c_cpus, s_cpus; 768 struct sclp_core_info *info; 769 u16 address; 770 771 /* Get CPU information */ 772 info = memblock_alloc(sizeof(*info), 8); 773 if (!info) 774 panic("%s: Failed to allocate %zu bytes align=0x%x\n", 775 __func__, sizeof(*info), 8); 776 smp_get_core_info(info, 1); 777 /* Find boot CPU type */ 778 if (sclp.has_core_type) { 779 address = stap(); 780 for (cpu = 0; cpu < info->combined; cpu++) 781 if (info->core[cpu].core_id == address) { 782 /* The boot cpu dictates the cpu type. */ 783 boot_core_type = info->core[cpu].type; 784 break; 785 } 786 if (cpu >= info->combined) 787 panic("Could not find boot CPU type"); 788 } 789 790 /* Set multi-threading state for the current system */ 791 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; 792 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; 793 pcpu_set_smt(mtid); 794 795 /* Print number of CPUs */ 796 c_cpus = s_cpus = 0; 797 for (cpu = 0; cpu < info->combined; cpu++) { 798 if (sclp.has_core_type && 799 info->core[cpu].type != boot_core_type) 800 continue; 801 if (cpu < info->configured) 802 c_cpus += smp_cpu_mtid + 1; 803 else 804 s_cpus += smp_cpu_mtid + 1; 805 } 806 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 807 808 /* Add CPUs present at boot */ 809 get_online_cpus(); 810 __smp_rescan_cpus(info, 0); 811 put_online_cpus(); 812 memblock_free_early((unsigned long)info, sizeof(*info)); 813 } 814 815 static void smp_init_secondary(void) 816 { 817 int cpu = smp_processor_id(); 818 819 S390_lowcore.last_update_clock = get_tod_clock(); 820 restore_access_regs(S390_lowcore.access_regs_save_area); 821 cpu_init(); 822 preempt_disable(); 823 init_cpu_timer(); 824 vtime_init(); 825 pfault_init(); 826 notify_cpu_starting(smp_processor_id()); 827 if (topology_cpu_dedicated(cpu)) 828 set_cpu_flag(CIF_DEDICATED_CPU); 829 else 830 clear_cpu_flag(CIF_DEDICATED_CPU); 831 set_cpu_online(smp_processor_id(), true); 832 inc_irq_stat(CPU_RST); 833 local_irq_enable(); 834 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 835 } 836 837 /* 838 * Activate a secondary processor. 839 */ 840 static void __no_sanitize_address smp_start_secondary(void *cpuvoid) 841 { 842 S390_lowcore.restart_stack = (unsigned long) restart_stack; 843 S390_lowcore.restart_fn = (unsigned long) do_restart; 844 S390_lowcore.restart_data = 0; 845 S390_lowcore.restart_source = -1UL; 846 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 847 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 848 CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); 849 } 850 851 /* Upping and downing of CPUs */ 852 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 853 { 854 struct pcpu *pcpu; 855 int base, i, rc; 856 857 pcpu = pcpu_devices + cpu; 858 if (pcpu->state != CPU_STATE_CONFIGURED) 859 return -EIO; 860 base = smp_get_base_cpu(cpu); 861 for (i = 0; i <= smp_cpu_mtid; i++) { 862 if (base + i < nr_cpu_ids) 863 if (cpu_online(base + i)) 864 break; 865 } 866 /* 867 * If this is the first CPU of the core to get online 868 * do an initial CPU reset. 869 */ 870 if (i > smp_cpu_mtid && 871 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) != 872 SIGP_CC_ORDER_CODE_ACCEPTED) 873 return -EIO; 874 875 rc = pcpu_alloc_lowcore(pcpu, cpu); 876 if (rc) 877 return rc; 878 pcpu_prepare_secondary(pcpu, cpu); 879 pcpu_attach_task(pcpu, tidle); 880 pcpu_start_fn(pcpu, smp_start_secondary, NULL); 881 /* Wait until cpu puts itself in the online & active maps */ 882 while (!cpu_online(cpu)) 883 cpu_relax(); 884 return 0; 885 } 886 887 static unsigned int setup_possible_cpus __initdata; 888 889 static int __init _setup_possible_cpus(char *s) 890 { 891 get_option(&s, &setup_possible_cpus); 892 return 0; 893 } 894 early_param("possible_cpus", _setup_possible_cpus); 895 896 #ifdef CONFIG_HOTPLUG_CPU 897 898 int __cpu_disable(void) 899 { 900 unsigned long cregs[16]; 901 902 /* Handle possible pending IPIs */ 903 smp_handle_ext_call(); 904 set_cpu_online(smp_processor_id(), false); 905 /* Disable pseudo page faults on this cpu. */ 906 pfault_fini(); 907 /* Disable interrupt sources via control register. */ 908 __ctl_store(cregs, 0, 15); 909 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 910 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 911 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 912 __ctl_load(cregs, 0, 15); 913 clear_cpu_flag(CIF_NOHZ_DELAY); 914 return 0; 915 } 916 917 void __cpu_die(unsigned int cpu) 918 { 919 struct pcpu *pcpu; 920 921 /* Wait until target cpu is down */ 922 pcpu = pcpu_devices + cpu; 923 while (!pcpu_stopped(pcpu)) 924 cpu_relax(); 925 pcpu_free_lowcore(pcpu); 926 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); 927 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); 928 } 929 930 void __noreturn cpu_die(void) 931 { 932 idle_task_exit(); 933 __bpon(); 934 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 935 for (;;) ; 936 } 937 938 #endif /* CONFIG_HOTPLUG_CPU */ 939 940 void __init smp_fill_possible_mask(void) 941 { 942 unsigned int possible, sclp_max, cpu; 943 944 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1; 945 sclp_max = min(smp_max_threads, sclp_max); 946 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids; 947 possible = setup_possible_cpus ?: nr_cpu_ids; 948 possible = min(possible, sclp_max); 949 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 950 set_cpu_possible(cpu, true); 951 } 952 953 void __init smp_prepare_cpus(unsigned int max_cpus) 954 { 955 /* request the 0x1201 emergency signal external interrupt */ 956 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 957 panic("Couldn't request external interrupt 0x1201"); 958 /* request the 0x1202 external call external interrupt */ 959 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 960 panic("Couldn't request external interrupt 0x1202"); 961 } 962 963 void __init smp_prepare_boot_cpu(void) 964 { 965 struct pcpu *pcpu = pcpu_devices; 966 967 WARN_ON(!cpu_present(0) || !cpu_online(0)); 968 pcpu->state = CPU_STATE_CONFIGURED; 969 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); 970 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 971 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 972 } 973 974 void __init smp_cpus_done(unsigned int max_cpus) 975 { 976 } 977 978 void __init smp_setup_processor_id(void) 979 { 980 pcpu_devices[0].address = stap(); 981 S390_lowcore.cpu_nr = 0; 982 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 983 S390_lowcore.spinlock_index = 0; 984 } 985 986 /* 987 * the frequency of the profiling timer can be changed 988 * by writing a multiplier value into /proc/profile. 989 * 990 * usually you want to run this on all CPUs ;) 991 */ 992 int setup_profiling_timer(unsigned int multiplier) 993 { 994 return 0; 995 } 996 997 #ifdef CONFIG_HOTPLUG_CPU 998 static ssize_t cpu_configure_show(struct device *dev, 999 struct device_attribute *attr, char *buf) 1000 { 1001 ssize_t count; 1002 1003 mutex_lock(&smp_cpu_state_mutex); 1004 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 1005 mutex_unlock(&smp_cpu_state_mutex); 1006 return count; 1007 } 1008 1009 static ssize_t cpu_configure_store(struct device *dev, 1010 struct device_attribute *attr, 1011 const char *buf, size_t count) 1012 { 1013 struct pcpu *pcpu; 1014 int cpu, val, rc, i; 1015 char delim; 1016 1017 if (sscanf(buf, "%d %c", &val, &delim) != 1) 1018 return -EINVAL; 1019 if (val != 0 && val != 1) 1020 return -EINVAL; 1021 get_online_cpus(); 1022 mutex_lock(&smp_cpu_state_mutex); 1023 rc = -EBUSY; 1024 /* disallow configuration changes of online cpus and cpu 0 */ 1025 cpu = dev->id; 1026 cpu = smp_get_base_cpu(cpu); 1027 if (cpu == 0) 1028 goto out; 1029 for (i = 0; i <= smp_cpu_mtid; i++) 1030 if (cpu_online(cpu + i)) 1031 goto out; 1032 pcpu = pcpu_devices + cpu; 1033 rc = 0; 1034 switch (val) { 1035 case 0: 1036 if (pcpu->state != CPU_STATE_CONFIGURED) 1037 break; 1038 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); 1039 if (rc) 1040 break; 1041 for (i = 0; i <= smp_cpu_mtid; i++) { 1042 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1043 continue; 1044 pcpu[i].state = CPU_STATE_STANDBY; 1045 smp_cpu_set_polarization(cpu + i, 1046 POLARIZATION_UNKNOWN); 1047 } 1048 topology_expect_change(); 1049 break; 1050 case 1: 1051 if (pcpu->state != CPU_STATE_STANDBY) 1052 break; 1053 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); 1054 if (rc) 1055 break; 1056 for (i = 0; i <= smp_cpu_mtid; i++) { 1057 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i)) 1058 continue; 1059 pcpu[i].state = CPU_STATE_CONFIGURED; 1060 smp_cpu_set_polarization(cpu + i, 1061 POLARIZATION_UNKNOWN); 1062 } 1063 topology_expect_change(); 1064 break; 1065 default: 1066 break; 1067 } 1068 out: 1069 mutex_unlock(&smp_cpu_state_mutex); 1070 put_online_cpus(); 1071 return rc ? rc : count; 1072 } 1073 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 1074 #endif /* CONFIG_HOTPLUG_CPU */ 1075 1076 static ssize_t show_cpu_address(struct device *dev, 1077 struct device_attribute *attr, char *buf) 1078 { 1079 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 1080 } 1081 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 1082 1083 static struct attribute *cpu_common_attrs[] = { 1084 #ifdef CONFIG_HOTPLUG_CPU 1085 &dev_attr_configure.attr, 1086 #endif 1087 &dev_attr_address.attr, 1088 NULL, 1089 }; 1090 1091 static struct attribute_group cpu_common_attr_group = { 1092 .attrs = cpu_common_attrs, 1093 }; 1094 1095 static struct attribute *cpu_online_attrs[] = { 1096 &dev_attr_idle_count.attr, 1097 &dev_attr_idle_time_us.attr, 1098 NULL, 1099 }; 1100 1101 static struct attribute_group cpu_online_attr_group = { 1102 .attrs = cpu_online_attrs, 1103 }; 1104 1105 static int smp_cpu_online(unsigned int cpu) 1106 { 1107 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1108 1109 return sysfs_create_group(&s->kobj, &cpu_online_attr_group); 1110 } 1111 static int smp_cpu_pre_down(unsigned int cpu) 1112 { 1113 struct device *s = &per_cpu(cpu_device, cpu)->dev; 1114 1115 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 1116 return 0; 1117 } 1118 1119 static int smp_add_present_cpu(int cpu) 1120 { 1121 struct device *s; 1122 struct cpu *c; 1123 int rc; 1124 1125 c = kzalloc(sizeof(*c), GFP_KERNEL); 1126 if (!c) 1127 return -ENOMEM; 1128 per_cpu(cpu_device, cpu) = c; 1129 s = &c->dev; 1130 c->hotpluggable = 1; 1131 rc = register_cpu(c, cpu); 1132 if (rc) 1133 goto out; 1134 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 1135 if (rc) 1136 goto out_cpu; 1137 rc = topology_cpu_init(c); 1138 if (rc) 1139 goto out_topology; 1140 return 0; 1141 1142 out_topology: 1143 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 1144 out_cpu: 1145 #ifdef CONFIG_HOTPLUG_CPU 1146 unregister_cpu(c); 1147 #endif 1148 out: 1149 return rc; 1150 } 1151 1152 #ifdef CONFIG_HOTPLUG_CPU 1153 1154 int __ref smp_rescan_cpus(void) 1155 { 1156 struct sclp_core_info *info; 1157 int nr; 1158 1159 info = kzalloc(sizeof(*info), GFP_KERNEL); 1160 if (!info) 1161 return -ENOMEM; 1162 smp_get_core_info(info, 0); 1163 get_online_cpus(); 1164 mutex_lock(&smp_cpu_state_mutex); 1165 nr = __smp_rescan_cpus(info, 1); 1166 mutex_unlock(&smp_cpu_state_mutex); 1167 put_online_cpus(); 1168 kfree(info); 1169 if (nr) 1170 topology_schedule_update(); 1171 return 0; 1172 } 1173 1174 static ssize_t __ref rescan_store(struct device *dev, 1175 struct device_attribute *attr, 1176 const char *buf, 1177 size_t count) 1178 { 1179 int rc; 1180 1181 rc = lock_device_hotplug_sysfs(); 1182 if (rc) 1183 return rc; 1184 rc = smp_rescan_cpus(); 1185 unlock_device_hotplug(); 1186 return rc ? rc : count; 1187 } 1188 static DEVICE_ATTR_WO(rescan); 1189 #endif /* CONFIG_HOTPLUG_CPU */ 1190 1191 static int __init s390_smp_init(void) 1192 { 1193 int cpu, rc = 0; 1194 1195 #ifdef CONFIG_HOTPLUG_CPU 1196 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1197 if (rc) 1198 return rc; 1199 #endif 1200 for_each_present_cpu(cpu) { 1201 rc = smp_add_present_cpu(cpu); 1202 if (rc) 1203 goto out; 1204 } 1205 1206 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online", 1207 smp_cpu_online, smp_cpu_pre_down); 1208 rc = rc <= 0 ? rc : 0; 1209 out: 1210 return rc; 1211 } 1212 subsys_initcall(s390_smp_init); 1213