1 /* 2 * SMP related functions 3 * 4 * Copyright IBM Corp. 1999, 2012 5 * Author(s): Denis Joseph Barrow, 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 7 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * The code outside of smp.c uses logical cpu numbers, only smp.c does 14 * the translation of logical to physical cpu ids. All new code that 15 * operates on physical cpu numbers needs to go into smp.c. 16 */ 17 18 #define KMSG_COMPONENT "cpu" 19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 20 21 #include <linux/workqueue.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <linux/mm.h> 25 #include <linux/err.h> 26 #include <linux/spinlock.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/delay.h> 29 #include <linux/interrupt.h> 30 #include <linux/irqflags.h> 31 #include <linux/cpu.h> 32 #include <linux/slab.h> 33 #include <linux/crash_dump.h> 34 #include <asm/asm-offsets.h> 35 #include <asm/switch_to.h> 36 #include <asm/facility.h> 37 #include <asm/ipl.h> 38 #include <asm/setup.h> 39 #include <asm/irq.h> 40 #include <asm/tlbflush.h> 41 #include <asm/vtimer.h> 42 #include <asm/lowcore.h> 43 #include <asm/sclp.h> 44 #include <asm/vdso.h> 45 #include <asm/debug.h> 46 #include <asm/os_info.h> 47 #include <asm/sigp.h> 48 #include <asm/idle.h> 49 #include "entry.h" 50 51 enum { 52 ec_schedule = 0, 53 ec_call_function_single, 54 ec_stop_cpu, 55 }; 56 57 enum { 58 CPU_STATE_STANDBY, 59 CPU_STATE_CONFIGURED, 60 }; 61 62 struct pcpu { 63 struct cpu *cpu; 64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 65 unsigned long async_stack; /* async stack for the cpu */ 66 unsigned long panic_stack; /* panic stack for the cpu */ 67 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 68 int state; /* physical cpu state */ 69 int polarization; /* physical polarization */ 70 u16 address; /* physical cpu address */ 71 }; 72 73 static u8 boot_cpu_type; 74 static u16 boot_cpu_address; 75 static struct pcpu pcpu_devices[NR_CPUS]; 76 77 /* 78 * The smp_cpu_state_mutex must be held when changing the state or polarization 79 * member of a pcpu data structure within the pcpu_devices arreay. 80 */ 81 DEFINE_MUTEX(smp_cpu_state_mutex); 82 83 /* 84 * Signal processor helper functions. 85 */ 86 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm, 87 u32 *status) 88 { 89 int cc; 90 91 while (1) { 92 cc = __pcpu_sigp(addr, order, parm, NULL); 93 if (cc != SIGP_CC_BUSY) 94 return cc; 95 cpu_relax(); 96 } 97 } 98 99 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm) 100 { 101 int cc, retry; 102 103 for (retry = 0; ; retry++) { 104 cc = __pcpu_sigp(pcpu->address, order, parm, NULL); 105 if (cc != SIGP_CC_BUSY) 106 break; 107 if (retry >= 3) 108 udelay(10); 109 } 110 return cc; 111 } 112 113 static inline int pcpu_stopped(struct pcpu *pcpu) 114 { 115 u32 uninitialized_var(status); 116 117 if (__pcpu_sigp(pcpu->address, SIGP_SENSE, 118 0, &status) != SIGP_CC_STATUS_STORED) 119 return 0; 120 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED)); 121 } 122 123 static inline int pcpu_running(struct pcpu *pcpu) 124 { 125 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING, 126 0, NULL) != SIGP_CC_STATUS_STORED) 127 return 1; 128 /* Status stored condition code is equivalent to cpu not running. */ 129 return 0; 130 } 131 132 /* 133 * Find struct pcpu by cpu address. 134 */ 135 static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address) 136 { 137 int cpu; 138 139 for_each_cpu(cpu, mask) 140 if (pcpu_devices[cpu].address == address) 141 return pcpu_devices + cpu; 142 return NULL; 143 } 144 145 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) 146 { 147 int order; 148 149 if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) 150 return; 151 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 152 pcpu_sigp_retry(pcpu, order, 0); 153 } 154 155 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 156 { 157 struct _lowcore *lc; 158 159 if (pcpu != &pcpu_devices[0]) { 160 pcpu->lowcore = (struct _lowcore *) 161 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 162 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 163 pcpu->panic_stack = __get_free_page(GFP_KERNEL); 164 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) 165 goto out; 166 } 167 lc = pcpu->lowcore; 168 memcpy(lc, &S390_lowcore, 512); 169 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 170 lc->async_stack = pcpu->async_stack + ASYNC_SIZE 171 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 172 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE 173 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 174 lc->cpu_nr = cpu; 175 lc->spinlock_lockval = arch_spin_lockval(cpu); 176 #ifndef CONFIG_64BIT 177 if (MACHINE_HAS_IEEE) { 178 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL); 179 if (!lc->extended_save_area_addr) 180 goto out; 181 } 182 #else 183 if (MACHINE_HAS_VX) 184 lc->vector_save_area_addr = 185 (unsigned long) &lc->vector_save_area; 186 if (vdso_alloc_per_cpu(lc)) 187 goto out; 188 #endif 189 lowcore_ptr[cpu] = lc; 190 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc); 191 return 0; 192 out: 193 if (pcpu != &pcpu_devices[0]) { 194 free_page(pcpu->panic_stack); 195 free_pages(pcpu->async_stack, ASYNC_ORDER); 196 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 197 } 198 return -ENOMEM; 199 } 200 201 #ifdef CONFIG_HOTPLUG_CPU 202 203 static void pcpu_free_lowcore(struct pcpu *pcpu) 204 { 205 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0); 206 lowcore_ptr[pcpu - pcpu_devices] = NULL; 207 #ifndef CONFIG_64BIT 208 if (MACHINE_HAS_IEEE) { 209 struct _lowcore *lc = pcpu->lowcore; 210 211 free_page((unsigned long) lc->extended_save_area_addr); 212 lc->extended_save_area_addr = 0; 213 } 214 #else 215 vdso_free_per_cpu(pcpu->lowcore); 216 #endif 217 if (pcpu != &pcpu_devices[0]) { 218 free_page(pcpu->panic_stack); 219 free_pages(pcpu->async_stack, ASYNC_ORDER); 220 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 221 } 222 } 223 224 #endif /* CONFIG_HOTPLUG_CPU */ 225 226 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) 227 { 228 struct _lowcore *lc = pcpu->lowcore; 229 230 if (MACHINE_HAS_TLB_LC) 231 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); 232 cpumask_set_cpu(cpu, mm_cpumask(&init_mm)); 233 atomic_inc(&init_mm.context.attach_count); 234 lc->cpu_nr = cpu; 235 lc->spinlock_lockval = arch_spin_lockval(cpu); 236 lc->percpu_offset = __per_cpu_offset[cpu]; 237 lc->kernel_asce = S390_lowcore.kernel_asce; 238 lc->machine_flags = S390_lowcore.machine_flags; 239 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 240 __ctl_store(lc->cregs_save_area, 0, 15); 241 save_access_regs((unsigned int *) lc->access_regs_save_area); 242 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 243 MAX_FACILITY_BIT/8); 244 } 245 246 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) 247 { 248 struct _lowcore *lc = pcpu->lowcore; 249 struct thread_info *ti = task_thread_info(tsk); 250 251 lc->kernel_stack = (unsigned long) task_stack_page(tsk) 252 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 253 lc->thread_info = (unsigned long) task_thread_info(tsk); 254 lc->current_task = (unsigned long) tsk; 255 lc->user_timer = ti->user_timer; 256 lc->system_timer = ti->system_timer; 257 lc->steal_timer = 0; 258 } 259 260 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) 261 { 262 struct _lowcore *lc = pcpu->lowcore; 263 264 lc->restart_stack = lc->kernel_stack; 265 lc->restart_fn = (unsigned long) func; 266 lc->restart_data = (unsigned long) data; 267 lc->restart_source = -1UL; 268 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0); 269 } 270 271 /* 272 * Call function via PSW restart on pcpu and stop the current cpu. 273 */ 274 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), 275 void *data, unsigned long stack) 276 { 277 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; 278 unsigned long source_cpu = stap(); 279 280 __load_psw_mask(PSW_KERNEL_BITS); 281 if (pcpu->address == source_cpu) 282 func(data); /* should not return */ 283 /* Stop target cpu (if func returns this stops the current cpu). */ 284 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 285 /* Restart func on the target cpu and stop the current cpu. */ 286 mem_assign_absolute(lc->restart_stack, stack); 287 mem_assign_absolute(lc->restart_fn, (unsigned long) func); 288 mem_assign_absolute(lc->restart_data, (unsigned long) data); 289 mem_assign_absolute(lc->restart_source, source_cpu); 290 asm volatile( 291 "0: sigp 0,%0,%2 # sigp restart to target cpu\n" 292 " brc 2,0b # busy, try again\n" 293 "1: sigp 0,%1,%3 # sigp stop to current cpu\n" 294 " brc 2,1b # busy, try again\n" 295 : : "d" (pcpu->address), "d" (source_cpu), 296 "K" (SIGP_RESTART), "K" (SIGP_STOP) 297 : "0", "1", "cc"); 298 for (;;) ; 299 } 300 301 /* 302 * Call function on an online CPU. 303 */ 304 void smp_call_online_cpu(void (*func)(void *), void *data) 305 { 306 struct pcpu *pcpu; 307 308 /* Use the current cpu if it is online. */ 309 pcpu = pcpu_find_address(cpu_online_mask, stap()); 310 if (!pcpu) 311 /* Use the first online cpu. */ 312 pcpu = pcpu_devices + cpumask_first(cpu_online_mask); 313 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack); 314 } 315 316 /* 317 * Call function on the ipl CPU. 318 */ 319 void smp_call_ipl_cpu(void (*func)(void *), void *data) 320 { 321 pcpu_delegate(&pcpu_devices[0], func, data, 322 pcpu_devices->panic_stack + PAGE_SIZE); 323 } 324 325 int smp_find_processor_id(u16 address) 326 { 327 int cpu; 328 329 for_each_present_cpu(cpu) 330 if (pcpu_devices[cpu].address == address) 331 return cpu; 332 return -1; 333 } 334 335 int smp_vcpu_scheduled(int cpu) 336 { 337 return pcpu_running(pcpu_devices + cpu); 338 } 339 340 void smp_yield_cpu(int cpu) 341 { 342 if (MACHINE_HAS_DIAG9C) 343 asm volatile("diag %0,0,0x9c" 344 : : "d" (pcpu_devices[cpu].address)); 345 else if (MACHINE_HAS_DIAG44) 346 asm volatile("diag 0,0,0x44"); 347 } 348 349 /* 350 * Send cpus emergency shutdown signal. This gives the cpus the 351 * opportunity to complete outstanding interrupts. 352 */ 353 static void smp_emergency_stop(cpumask_t *cpumask) 354 { 355 u64 end; 356 int cpu; 357 358 end = get_tod_clock() + (1000000UL << 12); 359 for_each_cpu(cpu, cpumask) { 360 struct pcpu *pcpu = pcpu_devices + cpu; 361 set_bit(ec_stop_cpu, &pcpu->ec_mask); 362 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL, 363 0, NULL) == SIGP_CC_BUSY && 364 get_tod_clock() < end) 365 cpu_relax(); 366 } 367 while (get_tod_clock() < end) { 368 for_each_cpu(cpu, cpumask) 369 if (pcpu_stopped(pcpu_devices + cpu)) 370 cpumask_clear_cpu(cpu, cpumask); 371 if (cpumask_empty(cpumask)) 372 break; 373 cpu_relax(); 374 } 375 } 376 377 /* 378 * Stop all cpus but the current one. 379 */ 380 void smp_send_stop(void) 381 { 382 cpumask_t cpumask; 383 int cpu; 384 385 /* Disable all interrupts/machine checks */ 386 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 387 trace_hardirqs_off(); 388 389 debug_set_critical(); 390 cpumask_copy(&cpumask, cpu_online_mask); 391 cpumask_clear_cpu(smp_processor_id(), &cpumask); 392 393 if (oops_in_progress) 394 smp_emergency_stop(&cpumask); 395 396 /* stop all processors */ 397 for_each_cpu(cpu, &cpumask) { 398 struct pcpu *pcpu = pcpu_devices + cpu; 399 pcpu_sigp_retry(pcpu, SIGP_STOP, 0); 400 while (!pcpu_stopped(pcpu)) 401 cpu_relax(); 402 } 403 } 404 405 /* 406 * This is the main routine where commands issued by other 407 * cpus are handled. 408 */ 409 static void smp_handle_ext_call(void) 410 { 411 unsigned long bits; 412 413 /* handle bit signal external calls */ 414 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); 415 if (test_bit(ec_stop_cpu, &bits)) 416 smp_stop_cpu(); 417 if (test_bit(ec_schedule, &bits)) 418 scheduler_ipi(); 419 if (test_bit(ec_call_function_single, &bits)) 420 generic_smp_call_function_single_interrupt(); 421 } 422 423 static void do_ext_call_interrupt(struct ext_code ext_code, 424 unsigned int param32, unsigned long param64) 425 { 426 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); 427 smp_handle_ext_call(); 428 } 429 430 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 431 { 432 int cpu; 433 434 for_each_cpu(cpu, mask) 435 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 436 } 437 438 void arch_send_call_function_single_ipi(int cpu) 439 { 440 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single); 441 } 442 443 #ifndef CONFIG_64BIT 444 /* 445 * this function sends a 'purge tlb' signal to another CPU. 446 */ 447 static void smp_ptlb_callback(void *info) 448 { 449 __tlb_flush_local(); 450 } 451 452 void smp_ptlb_all(void) 453 { 454 on_each_cpu(smp_ptlb_callback, NULL, 1); 455 } 456 EXPORT_SYMBOL(smp_ptlb_all); 457 #endif /* ! CONFIG_64BIT */ 458 459 /* 460 * this function sends a 'reschedule' IPI to another CPU. 461 * it goes straight through and wastes no time serializing 462 * anything. Worst case is that we lose a reschedule ... 463 */ 464 void smp_send_reschedule(int cpu) 465 { 466 pcpu_ec_call(pcpu_devices + cpu, ec_schedule); 467 } 468 469 /* 470 * parameter area for the set/clear control bit callbacks 471 */ 472 struct ec_creg_mask_parms { 473 unsigned long orval; 474 unsigned long andval; 475 int cr; 476 }; 477 478 /* 479 * callback for setting/clearing control bits 480 */ 481 static void smp_ctl_bit_callback(void *info) 482 { 483 struct ec_creg_mask_parms *pp = info; 484 unsigned long cregs[16]; 485 486 __ctl_store(cregs, 0, 15); 487 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; 488 __ctl_load(cregs, 0, 15); 489 } 490 491 /* 492 * Set a bit in a control register of all cpus 493 */ 494 void smp_ctl_set_bit(int cr, int bit) 495 { 496 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr }; 497 498 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 499 } 500 EXPORT_SYMBOL(smp_ctl_set_bit); 501 502 /* 503 * Clear a bit in a control register of all cpus 504 */ 505 void smp_ctl_clear_bit(int cr, int bit) 506 { 507 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr }; 508 509 on_each_cpu(smp_ctl_bit_callback, &parms, 1); 510 } 511 EXPORT_SYMBOL(smp_ctl_clear_bit); 512 513 #ifdef CONFIG_CRASH_DUMP 514 515 static void __init smp_get_save_area(int cpu, u16 address) 516 { 517 void *lc = pcpu_devices[0].lowcore; 518 struct save_area_ext *sa_ext; 519 unsigned long vx_sa; 520 521 if (is_kdump_kernel()) 522 return; 523 if (!OLDMEM_BASE && (address == boot_cpu_address || 524 ipl_info.type != IPL_TYPE_FCP_DUMP)) 525 return; 526 sa_ext = dump_save_area_create(cpu); 527 if (!sa_ext) 528 panic("could not allocate memory for save area\n"); 529 if (address == boot_cpu_address) { 530 /* Copy the registers of the boot cpu. */ 531 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), 532 SAVE_AREA_BASE - PAGE_SIZE, 0); 533 if (MACHINE_HAS_VX) 534 save_vx_regs_safe(sa_ext->vx_regs); 535 return; 536 } 537 /* Get the registers of a non-boot cpu. */ 538 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); 539 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa)); 540 if (!MACHINE_HAS_VX) 541 return; 542 /* Get the VX registers */ 543 vx_sa = __get_free_page(GFP_KERNEL); 544 if (!vx_sa) 545 panic("could not allocate memory for VX save area\n"); 546 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL); 547 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs)); 548 free_page(vx_sa); 549 } 550 551 int smp_store_status(int cpu) 552 { 553 unsigned long vx_sa; 554 struct pcpu *pcpu; 555 556 pcpu = pcpu_devices + cpu; 557 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, 558 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) 559 return -EIO; 560 if (!MACHINE_HAS_VX) 561 return 0; 562 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr); 563 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, 564 vx_sa, NULL); 565 return 0; 566 } 567 568 #else /* CONFIG_CRASH_DUMP */ 569 570 static inline void smp_get_save_area(int cpu, u16 address) { } 571 572 #endif /* CONFIG_CRASH_DUMP */ 573 574 void smp_cpu_set_polarization(int cpu, int val) 575 { 576 pcpu_devices[cpu].polarization = val; 577 } 578 579 int smp_cpu_get_polarization(int cpu) 580 { 581 return pcpu_devices[cpu].polarization; 582 } 583 584 static struct sclp_cpu_info *smp_get_cpu_info(void) 585 { 586 static int use_sigp_detection; 587 struct sclp_cpu_info *info; 588 int address; 589 590 info = kzalloc(sizeof(*info), GFP_KERNEL); 591 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { 592 use_sigp_detection = 1; 593 for (address = 0; address <= MAX_CPU_ADDRESS; address++) { 594 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == 595 SIGP_CC_NOT_OPERATIONAL) 596 continue; 597 info->cpu[info->configured].address = address; 598 info->configured++; 599 } 600 info->combined = info->configured; 601 } 602 return info; 603 } 604 605 static int smp_add_present_cpu(int cpu); 606 607 static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) 608 { 609 struct pcpu *pcpu; 610 cpumask_t avail; 611 int cpu, nr, i; 612 613 nr = 0; 614 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); 615 cpu = cpumask_first(&avail); 616 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { 617 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) 618 continue; 619 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address)) 620 continue; 621 pcpu = pcpu_devices + cpu; 622 pcpu->address = info->cpu[i].address; 623 pcpu->state = (i >= info->configured) ? 624 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 625 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 626 set_cpu_present(cpu, true); 627 if (sysfs_add && smp_add_present_cpu(cpu) != 0) 628 set_cpu_present(cpu, false); 629 else 630 nr++; 631 cpu = cpumask_next(cpu, &avail); 632 } 633 return nr; 634 } 635 636 static void __init smp_detect_cpus(void) 637 { 638 unsigned int cpu, c_cpus, s_cpus; 639 struct sclp_cpu_info *info; 640 641 info = smp_get_cpu_info(); 642 if (!info) 643 panic("smp_detect_cpus failed to allocate memory\n"); 644 if (info->has_cpu_type) { 645 for (cpu = 0; cpu < info->combined; cpu++) { 646 if (info->cpu[cpu].address != boot_cpu_address) 647 continue; 648 /* The boot cpu dictates the cpu type. */ 649 boot_cpu_type = info->cpu[cpu].type; 650 break; 651 } 652 } 653 c_cpus = s_cpus = 0; 654 for (cpu = 0; cpu < info->combined; cpu++) { 655 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) 656 continue; 657 if (cpu < info->configured) { 658 smp_get_save_area(c_cpus, info->cpu[cpu].address); 659 c_cpus++; 660 } else 661 s_cpus++; 662 } 663 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); 664 get_online_cpus(); 665 __smp_rescan_cpus(info, 0); 666 put_online_cpus(); 667 kfree(info); 668 } 669 670 /* 671 * Activate a secondary processor. 672 */ 673 static void smp_start_secondary(void *cpuvoid) 674 { 675 S390_lowcore.last_update_clock = get_tod_clock(); 676 S390_lowcore.restart_stack = (unsigned long) restart_stack; 677 S390_lowcore.restart_fn = (unsigned long) do_restart; 678 S390_lowcore.restart_data = 0; 679 S390_lowcore.restart_source = -1UL; 680 restore_access_regs(S390_lowcore.access_regs_save_area); 681 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 682 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 683 cpu_init(); 684 preempt_disable(); 685 init_cpu_timer(); 686 vtime_init(); 687 pfault_init(); 688 notify_cpu_starting(smp_processor_id()); 689 set_cpu_online(smp_processor_id(), true); 690 inc_irq_stat(CPU_RST); 691 local_irq_enable(); 692 cpu_startup_entry(CPUHP_ONLINE); 693 } 694 695 /* Upping and downing of CPUs */ 696 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 697 { 698 struct pcpu *pcpu; 699 int rc; 700 701 pcpu = pcpu_devices + cpu; 702 if (pcpu->state != CPU_STATE_CONFIGURED) 703 return -EIO; 704 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) != 705 SIGP_CC_ORDER_CODE_ACCEPTED) 706 return -EIO; 707 708 rc = pcpu_alloc_lowcore(pcpu, cpu); 709 if (rc) 710 return rc; 711 pcpu_prepare_secondary(pcpu, cpu); 712 pcpu_attach_task(pcpu, tidle); 713 pcpu_start_fn(pcpu, smp_start_secondary, NULL); 714 while (!cpu_online(cpu)) 715 cpu_relax(); 716 return 0; 717 } 718 719 static unsigned int setup_possible_cpus __initdata; 720 721 static int __init _setup_possible_cpus(char *s) 722 { 723 get_option(&s, &setup_possible_cpus); 724 return 0; 725 } 726 early_param("possible_cpus", _setup_possible_cpus); 727 728 #ifdef CONFIG_HOTPLUG_CPU 729 730 int __cpu_disable(void) 731 { 732 unsigned long cregs[16]; 733 734 /* Handle possible pending IPIs */ 735 smp_handle_ext_call(); 736 set_cpu_online(smp_processor_id(), false); 737 /* Disable pseudo page faults on this cpu. */ 738 pfault_fini(); 739 /* Disable interrupt sources via control register. */ 740 __ctl_store(cregs, 0, 15); 741 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */ 742 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */ 743 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */ 744 __ctl_load(cregs, 0, 15); 745 clear_cpu_flag(CIF_NOHZ_DELAY); 746 return 0; 747 } 748 749 void __cpu_die(unsigned int cpu) 750 { 751 struct pcpu *pcpu; 752 753 /* Wait until target cpu is down */ 754 pcpu = pcpu_devices + cpu; 755 while (!pcpu_stopped(pcpu)) 756 cpu_relax(); 757 pcpu_free_lowcore(pcpu); 758 atomic_dec(&init_mm.context.attach_count); 759 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm)); 760 if (MACHINE_HAS_TLB_LC) 761 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask); 762 } 763 764 void __noreturn cpu_die(void) 765 { 766 idle_task_exit(); 767 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0); 768 for (;;) ; 769 } 770 771 #endif /* CONFIG_HOTPLUG_CPU */ 772 773 void __init smp_fill_possible_mask(void) 774 { 775 unsigned int possible, sclp, cpu; 776 777 sclp = sclp_get_max_cpu() ?: nr_cpu_ids; 778 possible = setup_possible_cpus ?: nr_cpu_ids; 779 possible = min(possible, sclp); 780 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) 781 set_cpu_possible(cpu, true); 782 } 783 784 void __init smp_prepare_cpus(unsigned int max_cpus) 785 { 786 /* request the 0x1201 emergency signal external interrupt */ 787 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt)) 788 panic("Couldn't request external interrupt 0x1201"); 789 /* request the 0x1202 external call external interrupt */ 790 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt)) 791 panic("Couldn't request external interrupt 0x1202"); 792 smp_detect_cpus(); 793 } 794 795 void __init smp_prepare_boot_cpu(void) 796 { 797 struct pcpu *pcpu = pcpu_devices; 798 799 boot_cpu_address = stap(); 800 pcpu->state = CPU_STATE_CONFIGURED; 801 pcpu->address = boot_cpu_address; 802 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 803 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE 804 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 805 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE 806 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 807 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 808 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 809 set_cpu_present(0, true); 810 set_cpu_online(0, true); 811 } 812 813 void __init smp_cpus_done(unsigned int max_cpus) 814 { 815 } 816 817 void __init smp_setup_processor_id(void) 818 { 819 S390_lowcore.cpu_nr = 0; 820 S390_lowcore.spinlock_lockval = arch_spin_lockval(0); 821 } 822 823 /* 824 * the frequency of the profiling timer can be changed 825 * by writing a multiplier value into /proc/profile. 826 * 827 * usually you want to run this on all CPUs ;) 828 */ 829 int setup_profiling_timer(unsigned int multiplier) 830 { 831 return 0; 832 } 833 834 #ifdef CONFIG_HOTPLUG_CPU 835 static ssize_t cpu_configure_show(struct device *dev, 836 struct device_attribute *attr, char *buf) 837 { 838 ssize_t count; 839 840 mutex_lock(&smp_cpu_state_mutex); 841 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state); 842 mutex_unlock(&smp_cpu_state_mutex); 843 return count; 844 } 845 846 static ssize_t cpu_configure_store(struct device *dev, 847 struct device_attribute *attr, 848 const char *buf, size_t count) 849 { 850 struct pcpu *pcpu; 851 int cpu, val, rc; 852 char delim; 853 854 if (sscanf(buf, "%d %c", &val, &delim) != 1) 855 return -EINVAL; 856 if (val != 0 && val != 1) 857 return -EINVAL; 858 get_online_cpus(); 859 mutex_lock(&smp_cpu_state_mutex); 860 rc = -EBUSY; 861 /* disallow configuration changes of online cpus and cpu 0 */ 862 cpu = dev->id; 863 if (cpu_online(cpu) || cpu == 0) 864 goto out; 865 pcpu = pcpu_devices + cpu; 866 rc = 0; 867 switch (val) { 868 case 0: 869 if (pcpu->state != CPU_STATE_CONFIGURED) 870 break; 871 rc = sclp_cpu_deconfigure(pcpu->address); 872 if (rc) 873 break; 874 pcpu->state = CPU_STATE_STANDBY; 875 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 876 topology_expect_change(); 877 break; 878 case 1: 879 if (pcpu->state != CPU_STATE_STANDBY) 880 break; 881 rc = sclp_cpu_configure(pcpu->address); 882 if (rc) 883 break; 884 pcpu->state = CPU_STATE_CONFIGURED; 885 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 886 topology_expect_change(); 887 break; 888 default: 889 break; 890 } 891 out: 892 mutex_unlock(&smp_cpu_state_mutex); 893 put_online_cpus(); 894 return rc ? rc : count; 895 } 896 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 897 #endif /* CONFIG_HOTPLUG_CPU */ 898 899 static ssize_t show_cpu_address(struct device *dev, 900 struct device_attribute *attr, char *buf) 901 { 902 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address); 903 } 904 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); 905 906 static struct attribute *cpu_common_attrs[] = { 907 #ifdef CONFIG_HOTPLUG_CPU 908 &dev_attr_configure.attr, 909 #endif 910 &dev_attr_address.attr, 911 NULL, 912 }; 913 914 static struct attribute_group cpu_common_attr_group = { 915 .attrs = cpu_common_attrs, 916 }; 917 918 static struct attribute *cpu_online_attrs[] = { 919 &dev_attr_idle_count.attr, 920 &dev_attr_idle_time_us.attr, 921 NULL, 922 }; 923 924 static struct attribute_group cpu_online_attr_group = { 925 .attrs = cpu_online_attrs, 926 }; 927 928 static int smp_cpu_notify(struct notifier_block *self, unsigned long action, 929 void *hcpu) 930 { 931 unsigned int cpu = (unsigned int)(long)hcpu; 932 struct cpu *c = pcpu_devices[cpu].cpu; 933 struct device *s = &c->dev; 934 int err = 0; 935 936 switch (action & ~CPU_TASKS_FROZEN) { 937 case CPU_ONLINE: 938 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 939 break; 940 case CPU_DEAD: 941 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 942 break; 943 } 944 return notifier_from_errno(err); 945 } 946 947 static int smp_add_present_cpu(int cpu) 948 { 949 struct device *s; 950 struct cpu *c; 951 int rc; 952 953 c = kzalloc(sizeof(*c), GFP_KERNEL); 954 if (!c) 955 return -ENOMEM; 956 pcpu_devices[cpu].cpu = c; 957 s = &c->dev; 958 c->hotpluggable = 1; 959 rc = register_cpu(c, cpu); 960 if (rc) 961 goto out; 962 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); 963 if (rc) 964 goto out_cpu; 965 if (cpu_online(cpu)) { 966 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); 967 if (rc) 968 goto out_online; 969 } 970 rc = topology_cpu_init(c); 971 if (rc) 972 goto out_topology; 973 return 0; 974 975 out_topology: 976 if (cpu_online(cpu)) 977 sysfs_remove_group(&s->kobj, &cpu_online_attr_group); 978 out_online: 979 sysfs_remove_group(&s->kobj, &cpu_common_attr_group); 980 out_cpu: 981 #ifdef CONFIG_HOTPLUG_CPU 982 unregister_cpu(c); 983 #endif 984 out: 985 return rc; 986 } 987 988 #ifdef CONFIG_HOTPLUG_CPU 989 990 int __ref smp_rescan_cpus(void) 991 { 992 struct sclp_cpu_info *info; 993 int nr; 994 995 info = smp_get_cpu_info(); 996 if (!info) 997 return -ENOMEM; 998 get_online_cpus(); 999 mutex_lock(&smp_cpu_state_mutex); 1000 nr = __smp_rescan_cpus(info, 1); 1001 mutex_unlock(&smp_cpu_state_mutex); 1002 put_online_cpus(); 1003 kfree(info); 1004 if (nr) 1005 topology_schedule_update(); 1006 return 0; 1007 } 1008 1009 static ssize_t __ref rescan_store(struct device *dev, 1010 struct device_attribute *attr, 1011 const char *buf, 1012 size_t count) 1013 { 1014 int rc; 1015 1016 rc = smp_rescan_cpus(); 1017 return rc ? rc : count; 1018 } 1019 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); 1020 #endif /* CONFIG_HOTPLUG_CPU */ 1021 1022 static int __init s390_smp_init(void) 1023 { 1024 int cpu, rc = 0; 1025 1026 #ifdef CONFIG_HOTPLUG_CPU 1027 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); 1028 if (rc) 1029 return rc; 1030 #endif 1031 cpu_notifier_register_begin(); 1032 for_each_present_cpu(cpu) { 1033 rc = smp_add_present_cpu(cpu); 1034 if (rc) 1035 goto out; 1036 } 1037 1038 __hotcpu_notifier(smp_cpu_notify, 0); 1039 1040 out: 1041 cpu_notifier_register_done(); 1042 return rc; 1043 } 1044 subsys_initcall(s390_smp_init); 1045