1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright IBM Corp. 1999,2007 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 #include <linux/mm.h> 26 #include <linux/err.h> 27 #include <linux/spinlock.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/delay.h> 30 #include <linux/cache.h> 31 #include <linux/interrupt.h> 32 #include <linux/cpu.h> 33 #include <linux/timex.h> 34 #include <linux/bootmem.h> 35 #include <asm/ipl.h> 36 #include <asm/setup.h> 37 #include <asm/sigp.h> 38 #include <asm/pgalloc.h> 39 #include <asm/irq.h> 40 #include <asm/s390_ext.h> 41 #include <asm/cpcmd.h> 42 #include <asm/tlbflush.h> 43 #include <asm/timer.h> 44 #include <asm/lowcore.h> 45 46 /* 47 * An array with a pointer the lowcore of every CPU. 48 */ 49 struct _lowcore *lowcore_ptr[NR_CPUS]; 50 EXPORT_SYMBOL(lowcore_ptr); 51 52 cpumask_t cpu_online_map = CPU_MASK_NONE; 53 EXPORT_SYMBOL(cpu_online_map); 54 55 cpumask_t cpu_possible_map = CPU_MASK_NONE; 56 EXPORT_SYMBOL(cpu_possible_map); 57 58 static struct task_struct *current_set[NR_CPUS]; 59 60 static void smp_ext_bitcall(int, ec_bit_sig); 61 62 /* 63 * Structure and data for __smp_call_function_map(). This is designed to 64 * minimise static memory requirements. It also looks cleaner. 65 */ 66 static DEFINE_SPINLOCK(call_lock); 67 68 struct call_data_struct { 69 void (*func) (void *info); 70 void *info; 71 cpumask_t started; 72 cpumask_t finished; 73 int wait; 74 }; 75 76 static struct call_data_struct *call_data; 77 78 /* 79 * 'Call function' interrupt callback 80 */ 81 static void do_call_function(void) 82 { 83 void (*func) (void *info) = call_data->func; 84 void *info = call_data->info; 85 int wait = call_data->wait; 86 87 cpu_set(smp_processor_id(), call_data->started); 88 (*func)(info); 89 if (wait) 90 cpu_set(smp_processor_id(), call_data->finished);; 91 } 92 93 static void __smp_call_function_map(void (*func) (void *info), void *info, 94 int nonatomic, int wait, cpumask_t map) 95 { 96 struct call_data_struct data; 97 int cpu, local = 0; 98 99 /* 100 * Can deadlock when interrupts are disabled or if in wrong context. 101 */ 102 WARN_ON(irqs_disabled() || in_irq()); 103 104 /* 105 * Check for local function call. We have to have the same call order 106 * as in on_each_cpu() because of machine_restart_smp(). 107 */ 108 if (cpu_isset(smp_processor_id(), map)) { 109 local = 1; 110 cpu_clear(smp_processor_id(), map); 111 } 112 113 cpus_and(map, map, cpu_online_map); 114 if (cpus_empty(map)) 115 goto out; 116 117 data.func = func; 118 data.info = info; 119 data.started = CPU_MASK_NONE; 120 data.wait = wait; 121 if (wait) 122 data.finished = CPU_MASK_NONE; 123 124 spin_lock(&call_lock); 125 call_data = &data; 126 127 for_each_cpu_mask(cpu, map) 128 smp_ext_bitcall(cpu, ec_call_function); 129 130 /* Wait for response */ 131 while (!cpus_equal(map, data.started)) 132 cpu_relax(); 133 if (wait) 134 while (!cpus_equal(map, data.finished)) 135 cpu_relax(); 136 spin_unlock(&call_lock); 137 out: 138 if (local) { 139 local_irq_disable(); 140 func(info); 141 local_irq_enable(); 142 } 143 } 144 145 /* 146 * smp_call_function: 147 * @func: the function to run; this must be fast and non-blocking 148 * @info: an arbitrary pointer to pass to the function 149 * @nonatomic: unused 150 * @wait: if true, wait (atomically) until function has completed on other CPUs 151 * 152 * Run a function on all other CPUs. 153 * 154 * You must not call this function with disabled interrupts, from a 155 * hardware interrupt handler or from a bottom half. 156 */ 157 int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 158 int wait) 159 { 160 cpumask_t map; 161 162 preempt_disable(); 163 map = cpu_online_map; 164 cpu_clear(smp_processor_id(), map); 165 __smp_call_function_map(func, info, nonatomic, wait, map); 166 preempt_enable(); 167 return 0; 168 } 169 EXPORT_SYMBOL(smp_call_function); 170 171 /* 172 * smp_call_function_single: 173 * @cpu: the CPU where func should run 174 * @func: the function to run; this must be fast and non-blocking 175 * @info: an arbitrary pointer to pass to the function 176 * @nonatomic: unused 177 * @wait: if true, wait (atomically) until function has completed on other CPUs 178 * 179 * Run a function on one processor. 180 * 181 * You must not call this function with disabled interrupts, from a 182 * hardware interrupt handler or from a bottom half. 183 */ 184 int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 185 int nonatomic, int wait) 186 { 187 preempt_disable(); 188 __smp_call_function_map(func, info, nonatomic, wait, 189 cpumask_of_cpu(cpu)); 190 preempt_enable(); 191 return 0; 192 } 193 EXPORT_SYMBOL(smp_call_function_single); 194 195 static void do_send_stop(void) 196 { 197 int cpu, rc; 198 199 /* stop all processors */ 200 for_each_online_cpu(cpu) { 201 if (cpu == smp_processor_id()) 202 continue; 203 do { 204 rc = signal_processor(cpu, sigp_stop); 205 } while (rc == sigp_busy); 206 } 207 } 208 209 static void do_store_status(void) 210 { 211 int cpu, rc; 212 213 /* store status of all processors in their lowcores (real 0) */ 214 for_each_online_cpu(cpu) { 215 if (cpu == smp_processor_id()) 216 continue; 217 do { 218 rc = signal_processor_p( 219 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 220 sigp_store_status_at_address); 221 } while (rc == sigp_busy); 222 } 223 } 224 225 static void do_wait_for_stop(void) 226 { 227 int cpu; 228 229 /* Wait for all other cpus to enter stopped state */ 230 for_each_online_cpu(cpu) { 231 if (cpu == smp_processor_id()) 232 continue; 233 while (!smp_cpu_not_running(cpu)) 234 cpu_relax(); 235 } 236 } 237 238 /* 239 * this function sends a 'stop' sigp to all other CPUs in the system. 240 * it goes straight through. 241 */ 242 void smp_send_stop(void) 243 { 244 /* Disable all interrupts/machine checks */ 245 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 246 247 /* write magic number to zero page (absolute 0) */ 248 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 249 250 /* stop other processors. */ 251 do_send_stop(); 252 253 /* wait until other processors are stopped */ 254 do_wait_for_stop(); 255 256 /* store status of other processors. */ 257 do_store_status(); 258 } 259 260 /* 261 * Reboot, halt and power_off routines for SMP. 262 */ 263 void machine_restart_smp(char *__unused) 264 { 265 smp_send_stop(); 266 do_reipl(); 267 } 268 269 void machine_halt_smp(void) 270 { 271 smp_send_stop(); 272 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 273 __cpcmd(vmhalt_cmd, NULL, 0, NULL); 274 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 275 for (;;); 276 } 277 278 void machine_power_off_smp(void) 279 { 280 smp_send_stop(); 281 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 282 __cpcmd(vmpoff_cmd, NULL, 0, NULL); 283 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 284 for (;;); 285 } 286 287 /* 288 * This is the main routine where commands issued by other 289 * cpus are handled. 290 */ 291 292 static void do_ext_call_interrupt(__u16 code) 293 { 294 unsigned long bits; 295 296 /* 297 * handle bit signal external calls 298 * 299 * For the ec_schedule signal we have to do nothing. All the work 300 * is done automatically when we return from the interrupt. 301 */ 302 bits = xchg(&S390_lowcore.ext_call_fast, 0); 303 304 if (test_bit(ec_call_function, &bits)) 305 do_call_function(); 306 } 307 308 /* 309 * Send an external call sigp to another cpu and return without waiting 310 * for its completion. 311 */ 312 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 313 { 314 /* 315 * Set signaling bit in lowcore of target cpu and kick it 316 */ 317 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 318 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 319 udelay(10); 320 } 321 322 #ifndef CONFIG_64BIT 323 /* 324 * this function sends a 'purge tlb' signal to another CPU. 325 */ 326 void smp_ptlb_callback(void *info) 327 { 328 local_flush_tlb(); 329 } 330 331 void smp_ptlb_all(void) 332 { 333 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 334 } 335 EXPORT_SYMBOL(smp_ptlb_all); 336 #endif /* ! CONFIG_64BIT */ 337 338 /* 339 * this function sends a 'reschedule' IPI to another CPU. 340 * it goes straight through and wastes no time serializing 341 * anything. Worst case is that we lose a reschedule ... 342 */ 343 void smp_send_reschedule(int cpu) 344 { 345 smp_ext_bitcall(cpu, ec_schedule); 346 } 347 348 /* 349 * parameter area for the set/clear control bit callbacks 350 */ 351 struct ec_creg_mask_parms { 352 unsigned long orvals[16]; 353 unsigned long andvals[16]; 354 }; 355 356 /* 357 * callback for setting/clearing control bits 358 */ 359 static void smp_ctl_bit_callback(void *info) 360 { 361 struct ec_creg_mask_parms *pp = info; 362 unsigned long cregs[16]; 363 int i; 364 365 __ctl_store(cregs, 0, 15); 366 for (i = 0; i <= 15; i++) 367 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 368 __ctl_load(cregs, 0, 15); 369 } 370 371 /* 372 * Set a bit in a control register of all cpus 373 */ 374 void smp_ctl_set_bit(int cr, int bit) 375 { 376 struct ec_creg_mask_parms parms; 377 378 memset(&parms.orvals, 0, sizeof(parms.orvals)); 379 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 380 parms.orvals[cr] = 1 << bit; 381 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 382 } 383 EXPORT_SYMBOL(smp_ctl_set_bit); 384 385 /* 386 * Clear a bit in a control register of all cpus 387 */ 388 void smp_ctl_clear_bit(int cr, int bit) 389 { 390 struct ec_creg_mask_parms parms; 391 392 memset(&parms.orvals, 0, sizeof(parms.orvals)); 393 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 394 parms.andvals[cr] = ~(1L << bit); 395 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 396 } 397 EXPORT_SYMBOL(smp_ctl_clear_bit); 398 399 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 400 401 /* 402 * zfcpdump_prefix_array holds prefix registers for the following scenario: 403 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to 404 * save its prefix registers, since they get lost, when switching from 31 bit 405 * to 64 bit. 406 */ 407 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ 408 __attribute__((__section__(".data"))); 409 410 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 411 { 412 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 413 return; 414 if (cpu >= NR_CPUS) { 415 printk(KERN_WARNING "Registers for cpu %i not saved since dump " 416 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 417 return; 418 } 419 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 420 __cpu_logical_map[1] = (__u16) phy_cpu; 421 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 422 cpu_relax(); 423 memcpy(zfcpdump_save_areas[cpu], 424 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 425 SAVE_AREA_SIZE); 426 #ifdef CONFIG_64BIT 427 /* copy original prefix register */ 428 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; 429 #endif 430 } 431 432 union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 433 EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 434 435 #else 436 437 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 438 439 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 440 441 /* 442 * Lets check how many CPUs we have. 443 */ 444 static unsigned int __init smp_count_cpus(void) 445 { 446 unsigned int cpu, num_cpus; 447 __u16 boot_cpu_addr; 448 449 /* 450 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 451 */ 452 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 453 current_thread_info()->cpu = 0; 454 num_cpus = 1; 455 for (cpu = 0; cpu <= 65535; cpu++) { 456 if ((__u16) cpu == boot_cpu_addr) 457 continue; 458 __cpu_logical_map[1] = (__u16) cpu; 459 if (signal_processor(1, sigp_sense) == sigp_not_operational) 460 continue; 461 smp_get_save_area(num_cpus, cpu); 462 num_cpus++; 463 } 464 printk("Detected %d CPU's\n", (int) num_cpus); 465 printk("Boot cpu address %2X\n", boot_cpu_addr); 466 return num_cpus; 467 } 468 469 /* 470 * Activate a secondary processor. 471 */ 472 int __cpuinit start_secondary(void *cpuvoid) 473 { 474 /* Setup the cpu */ 475 cpu_init(); 476 preempt_disable(); 477 /* Enable TOD clock interrupts on the secondary cpu. */ 478 init_cpu_timer(); 479 #ifdef CONFIG_VIRT_TIMER 480 /* Enable cpu timer interrupts on the secondary cpu. */ 481 init_cpu_vtimer(); 482 #endif 483 /* Enable pfault pseudo page faults on this cpu. */ 484 pfault_init(); 485 486 /* Mark this cpu as online */ 487 cpu_set(smp_processor_id(), cpu_online_map); 488 /* Switch on interrupts */ 489 local_irq_enable(); 490 /* Print info about this processor */ 491 print_cpu_info(&S390_lowcore.cpu_data); 492 /* cpu_idle will call schedule for us */ 493 cpu_idle(); 494 return 0; 495 } 496 497 static void __init smp_create_idle(unsigned int cpu) 498 { 499 struct task_struct *p; 500 501 /* 502 * don't care about the psw and regs settings since we'll never 503 * reschedule the forked task. 504 */ 505 p = fork_idle(cpu); 506 if (IS_ERR(p)) 507 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 508 current_set[cpu] = p; 509 } 510 511 static int cpu_stopped(int cpu) 512 { 513 __u32 status; 514 515 /* Check for stopped state */ 516 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 517 sigp_status_stored) { 518 if (status & 0x40) 519 return 1; 520 } 521 return 0; 522 } 523 524 /* Upping and downing of CPUs */ 525 526 int __cpu_up(unsigned int cpu) 527 { 528 struct task_struct *idle; 529 struct _lowcore *cpu_lowcore; 530 struct stack_frame *sf; 531 sigp_ccode ccode; 532 int curr_cpu; 533 534 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 535 __cpu_logical_map[cpu] = (__u16) curr_cpu; 536 if (cpu_stopped(cpu)) 537 break; 538 } 539 540 if (!cpu_stopped(cpu)) 541 return -ENODEV; 542 543 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 544 cpu, sigp_set_prefix); 545 if (ccode) { 546 printk("sigp_set_prefix failed for cpu %d " 547 "with condition code %d\n", 548 (int) cpu, (int) ccode); 549 return -EIO; 550 } 551 552 idle = current_set[cpu]; 553 cpu_lowcore = lowcore_ptr[cpu]; 554 cpu_lowcore->kernel_stack = (unsigned long) 555 task_stack_page(idle) + THREAD_SIZE; 556 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 557 - sizeof(struct pt_regs) 558 - sizeof(struct stack_frame)); 559 memset(sf, 0, sizeof(struct stack_frame)); 560 sf->gprs[9] = (unsigned long) sf; 561 cpu_lowcore->save_area[15] = (unsigned long) sf; 562 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 563 asm volatile( 564 " stam 0,15,0(%0)" 565 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 566 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 567 cpu_lowcore->current_task = (unsigned long) idle; 568 cpu_lowcore->cpu_data.cpu_nr = cpu; 569 eieio(); 570 571 while (signal_processor(cpu, sigp_restart) == sigp_busy) 572 udelay(10); 573 574 while (!cpu_online(cpu)) 575 cpu_relax(); 576 return 0; 577 } 578 579 static unsigned int __initdata additional_cpus; 580 static unsigned int __initdata possible_cpus; 581 582 void __init smp_setup_cpu_possible_map(void) 583 { 584 unsigned int phy_cpus, pos_cpus, cpu; 585 586 phy_cpus = smp_count_cpus(); 587 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 588 589 if (possible_cpus) 590 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 591 592 for (cpu = 0; cpu < pos_cpus; cpu++) 593 cpu_set(cpu, cpu_possible_map); 594 595 phy_cpus = min(phy_cpus, pos_cpus); 596 597 for (cpu = 0; cpu < phy_cpus; cpu++) 598 cpu_set(cpu, cpu_present_map); 599 } 600 601 #ifdef CONFIG_HOTPLUG_CPU 602 603 static int __init setup_additional_cpus(char *s) 604 { 605 additional_cpus = simple_strtoul(s, NULL, 0); 606 return 0; 607 } 608 early_param("additional_cpus", setup_additional_cpus); 609 610 static int __init setup_possible_cpus(char *s) 611 { 612 possible_cpus = simple_strtoul(s, NULL, 0); 613 return 0; 614 } 615 early_param("possible_cpus", setup_possible_cpus); 616 617 int __cpu_disable(void) 618 { 619 struct ec_creg_mask_parms cr_parms; 620 int cpu = smp_processor_id(); 621 622 cpu_clear(cpu, cpu_online_map); 623 624 /* Disable pfault pseudo page faults on this cpu. */ 625 pfault_fini(); 626 627 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 628 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 629 630 /* disable all external interrupts */ 631 cr_parms.orvals[0] = 0; 632 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 633 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 634 /* disable all I/O interrupts */ 635 cr_parms.orvals[6] = 0; 636 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 637 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 638 /* disable most machine checks */ 639 cr_parms.orvals[14] = 0; 640 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 641 1 << 25 | 1 << 24); 642 643 smp_ctl_bit_callback(&cr_parms); 644 645 return 0; 646 } 647 648 void __cpu_die(unsigned int cpu) 649 { 650 /* Wait until target cpu is down */ 651 while (!smp_cpu_not_running(cpu)) 652 cpu_relax(); 653 printk("Processor %d spun down\n", cpu); 654 } 655 656 void cpu_die(void) 657 { 658 idle_task_exit(); 659 signal_processor(smp_processor_id(), sigp_stop); 660 BUG(); 661 for (;;); 662 } 663 664 #endif /* CONFIG_HOTPLUG_CPU */ 665 666 /* 667 * Cycle through the processors and setup structures. 668 */ 669 670 void __init smp_prepare_cpus(unsigned int max_cpus) 671 { 672 unsigned long stack; 673 unsigned int cpu; 674 int i; 675 676 /* request the 0x1201 emergency signal external interrupt */ 677 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 678 panic("Couldn't request external interrupt 0x1201"); 679 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 680 /* 681 * Initialize prefix pages and stacks for all possible cpus 682 */ 683 print_cpu_info(&S390_lowcore.cpu_data); 684 685 for_each_possible_cpu(i) { 686 lowcore_ptr[i] = (struct _lowcore *) 687 __get_free_pages(GFP_KERNEL | GFP_DMA, 688 sizeof(void*) == 8 ? 1 : 0); 689 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 690 if (!lowcore_ptr[i] || !stack) 691 panic("smp_boot_cpus failed to allocate memory\n"); 692 693 *(lowcore_ptr[i]) = S390_lowcore; 694 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; 695 stack = __get_free_pages(GFP_KERNEL, 0); 696 if (!stack) 697 panic("smp_boot_cpus failed to allocate memory\n"); 698 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; 699 #ifndef CONFIG_64BIT 700 if (MACHINE_HAS_IEEE) { 701 lowcore_ptr[i]->extended_save_area_addr = 702 (__u32) __get_free_pages(GFP_KERNEL, 0); 703 if (!lowcore_ptr[i]->extended_save_area_addr) 704 panic("smp_boot_cpus failed to " 705 "allocate memory\n"); 706 } 707 #endif 708 } 709 #ifndef CONFIG_64BIT 710 if (MACHINE_HAS_IEEE) 711 ctl_set_bit(14, 29); /* enable extended save area */ 712 #endif 713 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 714 715 for_each_possible_cpu(cpu) 716 if (cpu != smp_processor_id()) 717 smp_create_idle(cpu); 718 } 719 720 void __init smp_prepare_boot_cpu(void) 721 { 722 BUG_ON(smp_processor_id() != 0); 723 724 cpu_set(0, cpu_online_map); 725 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 726 current_set[0] = current; 727 } 728 729 void __init smp_cpus_done(unsigned int max_cpus) 730 { 731 cpu_present_map = cpu_possible_map; 732 } 733 734 /* 735 * the frequency of the profiling timer can be changed 736 * by writing a multiplier value into /proc/profile. 737 * 738 * usually you want to run this on all CPUs ;) 739 */ 740 int setup_profiling_timer(unsigned int multiplier) 741 { 742 return 0; 743 } 744 745 static DEFINE_PER_CPU(struct cpu, cpu_devices); 746 747 static ssize_t show_capability(struct sys_device *dev, char *buf) 748 { 749 unsigned int capability; 750 int rc; 751 752 rc = get_cpu_capability(&capability); 753 if (rc) 754 return rc; 755 return sprintf(buf, "%u\n", capability); 756 } 757 static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 758 759 static int __cpuinit smp_cpu_notify(struct notifier_block *self, 760 unsigned long action, void *hcpu) 761 { 762 unsigned int cpu = (unsigned int)(long)hcpu; 763 struct cpu *c = &per_cpu(cpu_devices, cpu); 764 struct sys_device *s = &c->sysdev; 765 766 switch (action) { 767 case CPU_ONLINE: 768 case CPU_ONLINE_FROZEN: 769 if (sysdev_create_file(s, &attr_capability)) 770 return NOTIFY_BAD; 771 break; 772 case CPU_DEAD: 773 case CPU_DEAD_FROZEN: 774 sysdev_remove_file(s, &attr_capability); 775 break; 776 } 777 return NOTIFY_OK; 778 } 779 780 static struct notifier_block __cpuinitdata smp_cpu_nb = { 781 .notifier_call = smp_cpu_notify, 782 }; 783 784 static int __init topology_init(void) 785 { 786 int cpu; 787 788 register_cpu_notifier(&smp_cpu_nb); 789 790 for_each_possible_cpu(cpu) { 791 struct cpu *c = &per_cpu(cpu_devices, cpu); 792 struct sys_device *s = &c->sysdev; 793 794 c->hotpluggable = 1; 795 register_cpu(c, cpu); 796 if (!cpu_online(cpu)) 797 continue; 798 s = &c->sysdev; 799 sysdev_create_file(s, &attr_capability); 800 } 801 return 0; 802 } 803 subsys_initcall(topology_init); 804