1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright IBM Corp. 1999,2007 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 #include <linux/mm.h> 26 #include <linux/spinlock.h> 27 #include <linux/kernel_stat.h> 28 #include <linux/delay.h> 29 #include <linux/cache.h> 30 #include <linux/interrupt.h> 31 #include <linux/cpu.h> 32 #include <linux/timex.h> 33 #include <linux/bootmem.h> 34 #include <asm/ipl.h> 35 #include <asm/setup.h> 36 #include <asm/sigp.h> 37 #include <asm/pgalloc.h> 38 #include <asm/irq.h> 39 #include <asm/s390_ext.h> 40 #include <asm/cpcmd.h> 41 #include <asm/tlbflush.h> 42 #include <asm/timer.h> 43 #include <asm/lowcore.h> 44 45 /* 46 * An array with a pointer the lowcore of every CPU. 47 */ 48 struct _lowcore *lowcore_ptr[NR_CPUS]; 49 EXPORT_SYMBOL(lowcore_ptr); 50 51 cpumask_t cpu_online_map = CPU_MASK_NONE; 52 EXPORT_SYMBOL(cpu_online_map); 53 54 cpumask_t cpu_possible_map = CPU_MASK_NONE; 55 EXPORT_SYMBOL(cpu_possible_map); 56 57 static struct task_struct *current_set[NR_CPUS]; 58 59 static void smp_ext_bitcall(int, ec_bit_sig); 60 61 /* 62 * Structure and data for __smp_call_function_map(). This is designed to 63 * minimise static memory requirements. It also looks cleaner. 64 */ 65 static DEFINE_SPINLOCK(call_lock); 66 67 struct call_data_struct { 68 void (*func) (void *info); 69 void *info; 70 cpumask_t started; 71 cpumask_t finished; 72 int wait; 73 }; 74 75 static struct call_data_struct *call_data; 76 77 /* 78 * 'Call function' interrupt callback 79 */ 80 static void do_call_function(void) 81 { 82 void (*func) (void *info) = call_data->func; 83 void *info = call_data->info; 84 int wait = call_data->wait; 85 86 cpu_set(smp_processor_id(), call_data->started); 87 (*func)(info); 88 if (wait) 89 cpu_set(smp_processor_id(), call_data->finished);; 90 } 91 92 static void __smp_call_function_map(void (*func) (void *info), void *info, 93 int nonatomic, int wait, cpumask_t map) 94 { 95 struct call_data_struct data; 96 int cpu, local = 0; 97 98 /* 99 * Can deadlock when interrupts are disabled or if in wrong context. 100 */ 101 WARN_ON(irqs_disabled() || in_irq()); 102 103 /* 104 * Check for local function call. We have to have the same call order 105 * as in on_each_cpu() because of machine_restart_smp(). 106 */ 107 if (cpu_isset(smp_processor_id(), map)) { 108 local = 1; 109 cpu_clear(smp_processor_id(), map); 110 } 111 112 cpus_and(map, map, cpu_online_map); 113 if (cpus_empty(map)) 114 goto out; 115 116 data.func = func; 117 data.info = info; 118 data.started = CPU_MASK_NONE; 119 data.wait = wait; 120 if (wait) 121 data.finished = CPU_MASK_NONE; 122 123 spin_lock_bh(&call_lock); 124 call_data = &data; 125 126 for_each_cpu_mask(cpu, map) 127 smp_ext_bitcall(cpu, ec_call_function); 128 129 /* Wait for response */ 130 while (!cpus_equal(map, data.started)) 131 cpu_relax(); 132 133 if (wait) 134 while (!cpus_equal(map, data.finished)) 135 cpu_relax(); 136 137 spin_unlock_bh(&call_lock); 138 139 out: 140 local_irq_disable(); 141 if (local) 142 func(info); 143 local_irq_enable(); 144 } 145 146 /* 147 * smp_call_function: 148 * @func: the function to run; this must be fast and non-blocking 149 * @info: an arbitrary pointer to pass to the function 150 * @nonatomic: unused 151 * @wait: if true, wait (atomically) until function has completed on other CPUs 152 * 153 * Run a function on all other CPUs. 154 * 155 * You must not call this function with disabled interrupts, from a 156 * hardware interrupt handler or from a bottom half. 157 */ 158 int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 159 int wait) 160 { 161 cpumask_t map; 162 163 preempt_disable(); 164 map = cpu_online_map; 165 cpu_clear(smp_processor_id(), map); 166 __smp_call_function_map(func, info, nonatomic, wait, map); 167 preempt_enable(); 168 return 0; 169 } 170 EXPORT_SYMBOL(smp_call_function); 171 172 /* 173 * smp_call_function_on: 174 * @func: the function to run; this must be fast and non-blocking 175 * @info: an arbitrary pointer to pass to the function 176 * @nonatomic: unused 177 * @wait: if true, wait (atomically) until function has completed on other CPUs 178 * @cpu: the CPU where func should run 179 * 180 * Run a function on one processor. 181 * 182 * You must not call this function with disabled interrupts, from a 183 * hardware interrupt handler or from a bottom half. 184 */ 185 int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 186 int wait, int cpu) 187 { 188 cpumask_t map = CPU_MASK_NONE; 189 190 preempt_disable(); 191 cpu_set(cpu, map); 192 __smp_call_function_map(func, info, nonatomic, wait, map); 193 preempt_enable(); 194 return 0; 195 } 196 EXPORT_SYMBOL(smp_call_function_on); 197 198 static void do_send_stop(void) 199 { 200 int cpu, rc; 201 202 /* stop all processors */ 203 for_each_online_cpu(cpu) { 204 if (cpu == smp_processor_id()) 205 continue; 206 do { 207 rc = signal_processor(cpu, sigp_stop); 208 } while (rc == sigp_busy); 209 } 210 } 211 212 static void do_store_status(void) 213 { 214 int cpu, rc; 215 216 /* store status of all processors in their lowcores (real 0) */ 217 for_each_online_cpu(cpu) { 218 if (cpu == smp_processor_id()) 219 continue; 220 do { 221 rc = signal_processor_p( 222 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 223 sigp_store_status_at_address); 224 } while (rc == sigp_busy); 225 } 226 } 227 228 static void do_wait_for_stop(void) 229 { 230 int cpu; 231 232 /* Wait for all other cpus to enter stopped state */ 233 for_each_online_cpu(cpu) { 234 if (cpu == smp_processor_id()) 235 continue; 236 while (!smp_cpu_not_running(cpu)) 237 cpu_relax(); 238 } 239 } 240 241 /* 242 * this function sends a 'stop' sigp to all other CPUs in the system. 243 * it goes straight through. 244 */ 245 void smp_send_stop(void) 246 { 247 /* Disable all interrupts/machine checks */ 248 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 249 250 /* write magic number to zero page (absolute 0) */ 251 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 252 253 /* stop other processors. */ 254 do_send_stop(); 255 256 /* wait until other processors are stopped */ 257 do_wait_for_stop(); 258 259 /* store status of other processors. */ 260 do_store_status(); 261 } 262 263 /* 264 * Reboot, halt and power_off routines for SMP. 265 */ 266 void machine_restart_smp(char *__unused) 267 { 268 smp_send_stop(); 269 do_reipl(); 270 } 271 272 void machine_halt_smp(void) 273 { 274 smp_send_stop(); 275 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 276 __cpcmd(vmhalt_cmd, NULL, 0, NULL); 277 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 278 for (;;); 279 } 280 281 void machine_power_off_smp(void) 282 { 283 smp_send_stop(); 284 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 285 __cpcmd(vmpoff_cmd, NULL, 0, NULL); 286 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 287 for (;;); 288 } 289 290 /* 291 * This is the main routine where commands issued by other 292 * cpus are handled. 293 */ 294 295 static void do_ext_call_interrupt(__u16 code) 296 { 297 unsigned long bits; 298 299 /* 300 * handle bit signal external calls 301 * 302 * For the ec_schedule signal we have to do nothing. All the work 303 * is done automatically when we return from the interrupt. 304 */ 305 bits = xchg(&S390_lowcore.ext_call_fast, 0); 306 307 if (test_bit(ec_call_function, &bits)) 308 do_call_function(); 309 } 310 311 /* 312 * Send an external call sigp to another cpu and return without waiting 313 * for its completion. 314 */ 315 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 316 { 317 /* 318 * Set signaling bit in lowcore of target cpu and kick it 319 */ 320 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 321 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 322 udelay(10); 323 } 324 325 #ifndef CONFIG_64BIT 326 /* 327 * this function sends a 'purge tlb' signal to another CPU. 328 */ 329 void smp_ptlb_callback(void *info) 330 { 331 local_flush_tlb(); 332 } 333 334 void smp_ptlb_all(void) 335 { 336 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 337 } 338 EXPORT_SYMBOL(smp_ptlb_all); 339 #endif /* ! CONFIG_64BIT */ 340 341 /* 342 * this function sends a 'reschedule' IPI to another CPU. 343 * it goes straight through and wastes no time serializing 344 * anything. Worst case is that we lose a reschedule ... 345 */ 346 void smp_send_reschedule(int cpu) 347 { 348 smp_ext_bitcall(cpu, ec_schedule); 349 } 350 351 /* 352 * parameter area for the set/clear control bit callbacks 353 */ 354 struct ec_creg_mask_parms { 355 unsigned long orvals[16]; 356 unsigned long andvals[16]; 357 }; 358 359 /* 360 * callback for setting/clearing control bits 361 */ 362 static void smp_ctl_bit_callback(void *info) 363 { 364 struct ec_creg_mask_parms *pp = info; 365 unsigned long cregs[16]; 366 int i; 367 368 __ctl_store(cregs, 0, 15); 369 for (i = 0; i <= 15; i++) 370 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 371 __ctl_load(cregs, 0, 15); 372 } 373 374 /* 375 * Set a bit in a control register of all cpus 376 */ 377 void smp_ctl_set_bit(int cr, int bit) 378 { 379 struct ec_creg_mask_parms parms; 380 381 memset(&parms.orvals, 0, sizeof(parms.orvals)); 382 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 383 parms.orvals[cr] = 1 << bit; 384 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 385 } 386 EXPORT_SYMBOL(smp_ctl_set_bit); 387 388 /* 389 * Clear a bit in a control register of all cpus 390 */ 391 void smp_ctl_clear_bit(int cr, int bit) 392 { 393 struct ec_creg_mask_parms parms; 394 395 memset(&parms.orvals, 0, sizeof(parms.orvals)); 396 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 397 parms.andvals[cr] = ~(1L << bit); 398 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 399 } 400 EXPORT_SYMBOL(smp_ctl_clear_bit); 401 402 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 403 404 /* 405 * zfcpdump_prefix_array holds prefix registers for the following scenario: 406 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to 407 * save its prefix registers, since they get lost, when switching from 31 bit 408 * to 64 bit. 409 */ 410 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ 411 __attribute__((__section__(".data"))); 412 413 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) 414 { 415 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 416 return; 417 if (cpu >= NR_CPUS) { 418 printk(KERN_WARNING "Registers for cpu %i not saved since dump " 419 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); 420 return; 421 } 422 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); 423 __cpu_logical_map[1] = (__u16) phy_cpu; 424 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) 425 cpu_relax(); 426 memcpy(zfcpdump_save_areas[cpu], 427 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, 428 SAVE_AREA_SIZE); 429 #ifdef CONFIG_64BIT 430 /* copy original prefix register */ 431 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; 432 #endif 433 } 434 435 union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 436 EXPORT_SYMBOL_GPL(zfcpdump_save_areas); 437 438 #else 439 440 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 441 442 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 443 444 /* 445 * Lets check how many CPUs we have. 446 */ 447 static unsigned int __init smp_count_cpus(void) 448 { 449 unsigned int cpu, num_cpus; 450 __u16 boot_cpu_addr; 451 452 /* 453 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 454 */ 455 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 456 current_thread_info()->cpu = 0; 457 num_cpus = 1; 458 for (cpu = 0; cpu <= 65535; cpu++) { 459 if ((__u16) cpu == boot_cpu_addr) 460 continue; 461 __cpu_logical_map[1] = (__u16) cpu; 462 if (signal_processor(1, sigp_sense) == sigp_not_operational) 463 continue; 464 smp_get_save_area(num_cpus, cpu); 465 num_cpus++; 466 } 467 printk("Detected %d CPU's\n", (int) num_cpus); 468 printk("Boot cpu address %2X\n", boot_cpu_addr); 469 return num_cpus; 470 } 471 472 /* 473 * Activate a secondary processor. 474 */ 475 int __cpuinit start_secondary(void *cpuvoid) 476 { 477 /* Setup the cpu */ 478 cpu_init(); 479 preempt_disable(); 480 /* Enable TOD clock interrupts on the secondary cpu. */ 481 init_cpu_timer(); 482 #ifdef CONFIG_VIRT_TIMER 483 /* Enable cpu timer interrupts on the secondary cpu. */ 484 init_cpu_vtimer(); 485 #endif 486 /* Enable pfault pseudo page faults on this cpu. */ 487 pfault_init(); 488 489 /* Mark this cpu as online */ 490 cpu_set(smp_processor_id(), cpu_online_map); 491 /* Switch on interrupts */ 492 local_irq_enable(); 493 /* Print info about this processor */ 494 print_cpu_info(&S390_lowcore.cpu_data); 495 /* cpu_idle will call schedule for us */ 496 cpu_idle(); 497 return 0; 498 } 499 500 static void __init smp_create_idle(unsigned int cpu) 501 { 502 struct task_struct *p; 503 504 /* 505 * don't care about the psw and regs settings since we'll never 506 * reschedule the forked task. 507 */ 508 p = fork_idle(cpu); 509 if (IS_ERR(p)) 510 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 511 current_set[cpu] = p; 512 } 513 514 static int cpu_stopped(int cpu) 515 { 516 __u32 status; 517 518 /* Check for stopped state */ 519 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == 520 sigp_status_stored) { 521 if (status & 0x40) 522 return 1; 523 } 524 return 0; 525 } 526 527 /* Upping and downing of CPUs */ 528 529 int __cpu_up(unsigned int cpu) 530 { 531 struct task_struct *idle; 532 struct _lowcore *cpu_lowcore; 533 struct stack_frame *sf; 534 sigp_ccode ccode; 535 int curr_cpu; 536 537 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 538 __cpu_logical_map[cpu] = (__u16) curr_cpu; 539 if (cpu_stopped(cpu)) 540 break; 541 } 542 543 if (!cpu_stopped(cpu)) 544 return -ENODEV; 545 546 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 547 cpu, sigp_set_prefix); 548 if (ccode) { 549 printk("sigp_set_prefix failed for cpu %d " 550 "with condition code %d\n", 551 (int) cpu, (int) ccode); 552 return -EIO; 553 } 554 555 idle = current_set[cpu]; 556 cpu_lowcore = lowcore_ptr[cpu]; 557 cpu_lowcore->kernel_stack = (unsigned long) 558 task_stack_page(idle) + THREAD_SIZE; 559 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 560 - sizeof(struct pt_regs) 561 - sizeof(struct stack_frame)); 562 memset(sf, 0, sizeof(struct stack_frame)); 563 sf->gprs[9] = (unsigned long) sf; 564 cpu_lowcore->save_area[15] = (unsigned long) sf; 565 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 566 asm volatile( 567 " stam 0,15,0(%0)" 568 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 569 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 570 cpu_lowcore->current_task = (unsigned long) idle; 571 cpu_lowcore->cpu_data.cpu_nr = cpu; 572 eieio(); 573 574 while (signal_processor(cpu, sigp_restart) == sigp_busy) 575 udelay(10); 576 577 while (!cpu_online(cpu)) 578 cpu_relax(); 579 return 0; 580 } 581 582 static unsigned int __initdata additional_cpus; 583 static unsigned int __initdata possible_cpus; 584 585 void __init smp_setup_cpu_possible_map(void) 586 { 587 unsigned int phy_cpus, pos_cpus, cpu; 588 589 phy_cpus = smp_count_cpus(); 590 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 591 592 if (possible_cpus) 593 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 594 595 for (cpu = 0; cpu < pos_cpus; cpu++) 596 cpu_set(cpu, cpu_possible_map); 597 598 phy_cpus = min(phy_cpus, pos_cpus); 599 600 for (cpu = 0; cpu < phy_cpus; cpu++) 601 cpu_set(cpu, cpu_present_map); 602 } 603 604 #ifdef CONFIG_HOTPLUG_CPU 605 606 static int __init setup_additional_cpus(char *s) 607 { 608 additional_cpus = simple_strtoul(s, NULL, 0); 609 return 0; 610 } 611 early_param("additional_cpus", setup_additional_cpus); 612 613 static int __init setup_possible_cpus(char *s) 614 { 615 possible_cpus = simple_strtoul(s, NULL, 0); 616 return 0; 617 } 618 early_param("possible_cpus", setup_possible_cpus); 619 620 int __cpu_disable(void) 621 { 622 struct ec_creg_mask_parms cr_parms; 623 int cpu = smp_processor_id(); 624 625 cpu_clear(cpu, cpu_online_map); 626 627 /* Disable pfault pseudo page faults on this cpu. */ 628 pfault_fini(); 629 630 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 631 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 632 633 /* disable all external interrupts */ 634 cr_parms.orvals[0] = 0; 635 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | 636 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); 637 /* disable all I/O interrupts */ 638 cr_parms.orvals[6] = 0; 639 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 640 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); 641 /* disable most machine checks */ 642 cr_parms.orvals[14] = 0; 643 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | 644 1 << 25 | 1 << 24); 645 646 smp_ctl_bit_callback(&cr_parms); 647 648 return 0; 649 } 650 651 void __cpu_die(unsigned int cpu) 652 { 653 /* Wait until target cpu is down */ 654 while (!smp_cpu_not_running(cpu)) 655 cpu_relax(); 656 printk("Processor %d spun down\n", cpu); 657 } 658 659 void cpu_die(void) 660 { 661 idle_task_exit(); 662 signal_processor(smp_processor_id(), sigp_stop); 663 BUG(); 664 for (;;); 665 } 666 667 #endif /* CONFIG_HOTPLUG_CPU */ 668 669 /* 670 * Cycle through the processors and setup structures. 671 */ 672 673 void __init smp_prepare_cpus(unsigned int max_cpus) 674 { 675 unsigned long stack; 676 unsigned int cpu; 677 int i; 678 679 /* request the 0x1201 emergency signal external interrupt */ 680 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 681 panic("Couldn't request external interrupt 0x1201"); 682 memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); 683 /* 684 * Initialize prefix pages and stacks for all possible cpus 685 */ 686 print_cpu_info(&S390_lowcore.cpu_data); 687 688 for_each_possible_cpu(i) { 689 lowcore_ptr[i] = (struct _lowcore *) 690 __get_free_pages(GFP_KERNEL | GFP_DMA, 691 sizeof(void*) == 8 ? 1 : 0); 692 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 693 if (!lowcore_ptr[i] || !stack) 694 panic("smp_boot_cpus failed to allocate memory\n"); 695 696 *(lowcore_ptr[i]) = S390_lowcore; 697 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; 698 stack = __get_free_pages(GFP_KERNEL, 0); 699 if (!stack) 700 panic("smp_boot_cpus failed to allocate memory\n"); 701 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; 702 #ifndef CONFIG_64BIT 703 if (MACHINE_HAS_IEEE) { 704 lowcore_ptr[i]->extended_save_area_addr = 705 (__u32) __get_free_pages(GFP_KERNEL, 0); 706 if (!lowcore_ptr[i]->extended_save_area_addr) 707 panic("smp_boot_cpus failed to " 708 "allocate memory\n"); 709 } 710 #endif 711 } 712 #ifndef CONFIG_64BIT 713 if (MACHINE_HAS_IEEE) 714 ctl_set_bit(14, 29); /* enable extended save area */ 715 #endif 716 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 717 718 for_each_possible_cpu(cpu) 719 if (cpu != smp_processor_id()) 720 smp_create_idle(cpu); 721 } 722 723 void __init smp_prepare_boot_cpu(void) 724 { 725 BUG_ON(smp_processor_id() != 0); 726 727 cpu_set(0, cpu_online_map); 728 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 729 current_set[0] = current; 730 } 731 732 void __init smp_cpus_done(unsigned int max_cpus) 733 { 734 cpu_present_map = cpu_possible_map; 735 } 736 737 /* 738 * the frequency of the profiling timer can be changed 739 * by writing a multiplier value into /proc/profile. 740 * 741 * usually you want to run this on all CPUs ;) 742 */ 743 int setup_profiling_timer(unsigned int multiplier) 744 { 745 return 0; 746 } 747 748 static DEFINE_PER_CPU(struct cpu, cpu_devices); 749 750 static ssize_t show_capability(struct sys_device *dev, char *buf) 751 { 752 unsigned int capability; 753 int rc; 754 755 rc = get_cpu_capability(&capability); 756 if (rc) 757 return rc; 758 return sprintf(buf, "%u\n", capability); 759 } 760 static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 761 762 static int __cpuinit smp_cpu_notify(struct notifier_block *self, 763 unsigned long action, void *hcpu) 764 { 765 unsigned int cpu = (unsigned int)(long)hcpu; 766 struct cpu *c = &per_cpu(cpu_devices, cpu); 767 struct sys_device *s = &c->sysdev; 768 769 switch (action) { 770 case CPU_ONLINE: 771 case CPU_ONLINE_FROZEN: 772 if (sysdev_create_file(s, &attr_capability)) 773 return NOTIFY_BAD; 774 break; 775 case CPU_DEAD: 776 case CPU_DEAD_FROZEN: 777 sysdev_remove_file(s, &attr_capability); 778 break; 779 } 780 return NOTIFY_OK; 781 } 782 783 static struct notifier_block __cpuinitdata smp_cpu_nb = { 784 .notifier_call = smp_cpu_notify, 785 }; 786 787 static int __init topology_init(void) 788 { 789 int cpu; 790 791 register_cpu_notifier(&smp_cpu_nb); 792 793 for_each_possible_cpu(cpu) { 794 struct cpu *c = &per_cpu(cpu_devices, cpu); 795 struct sys_device *s = &c->sysdev; 796 797 c->hotpluggable = 1; 798 register_cpu(c, cpu); 799 if (!cpu_online(cpu)) 800 continue; 801 s = &c->sysdev; 802 sysdev_create_file(s, &attr_capability); 803 } 804 return 0; 805 } 806 subsys_initcall(topology_init); 807