1 /* 2 * arch/s390/kernel/smp.c 3 * 4 * Copyright (C) IBM Corp. 1999,2006 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Heiko Carstens (heiko.carstens@de.ibm.com) 8 * 9 * based on other smp stuff by 10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> 11 * (c) 1998 Ingo Molnar 12 * 13 * We work with logical cpu numbering everywhere we can. The only 14 * functions using the real cpu address (got from STAP) are the sigp 15 * functions. For all other functions we use the identity mapping. 16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is 17 * used e.g. to find the idle task belonging to a logical cpu. Every array 18 * in the kernel is sorted by the logical cpu number and not by the physical 19 * one which is causing all the confusion with __cpu_logical_map and 20 * cpu_number_map in other architectures. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/init.h> 25 26 #include <linux/mm.h> 27 #include <linux/spinlock.h> 28 #include <linux/kernel_stat.h> 29 #include <linux/smp_lock.h> 30 31 #include <linux/delay.h> 32 #include <linux/cache.h> 33 #include <linux/interrupt.h> 34 #include <linux/cpu.h> 35 36 #include <asm/sigp.h> 37 #include <asm/pgalloc.h> 38 #include <asm/irq.h> 39 #include <asm/s390_ext.h> 40 #include <asm/cpcmd.h> 41 #include <asm/tlbflush.h> 42 43 extern volatile int __cpu_logical_map[]; 44 45 /* 46 * An array with a pointer the lowcore of every CPU. 47 */ 48 49 struct _lowcore *lowcore_ptr[NR_CPUS]; 50 51 cpumask_t cpu_online_map = CPU_MASK_NONE; 52 cpumask_t cpu_possible_map = CPU_MASK_NONE; 53 54 static struct task_struct *current_set[NR_CPUS]; 55 56 /* 57 * Reboot, halt and power_off routines for SMP. 58 */ 59 extern char vmhalt_cmd[]; 60 extern char vmpoff_cmd[]; 61 62 static void smp_ext_bitcall(int, ec_bit_sig); 63 static void smp_ext_bitcall_others(ec_bit_sig); 64 65 /* 66 5B * Structure and data for smp_call_function(). This is designed to minimise 67 * static memory requirements. It also looks cleaner. 68 */ 69 static DEFINE_SPINLOCK(call_lock); 70 71 struct call_data_struct { 72 void (*func) (void *info); 73 void *info; 74 atomic_t started; 75 atomic_t finished; 76 int wait; 77 }; 78 79 static struct call_data_struct * call_data; 80 81 /* 82 * 'Call function' interrupt callback 83 */ 84 static void do_call_function(void) 85 { 86 void (*func) (void *info) = call_data->func; 87 void *info = call_data->info; 88 int wait = call_data->wait; 89 90 atomic_inc(&call_data->started); 91 (*func)(info); 92 if (wait) 93 atomic_inc(&call_data->finished); 94 } 95 96 /* 97 * this function sends a 'generic call function' IPI to all other CPUs 98 * in the system. 99 */ 100 101 int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 102 int wait) 103 /* 104 * [SUMMARY] Run a function on all other CPUs. 105 * <func> The function to run. This must be fast and non-blocking. 106 * <info> An arbitrary pointer to pass to the function. 107 * <nonatomic> currently unused. 108 * <wait> If true, wait (atomically) until function has completed on other CPUs. 109 * [RETURNS] 0 on success, else a negative status code. Does not return until 110 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 111 * 112 * You must not call this function with disabled interrupts or from a 113 * hardware interrupt handler or from a bottom half handler. 114 */ 115 { 116 struct call_data_struct data; 117 int cpus = num_online_cpus()-1; 118 119 if (cpus <= 0) 120 return 0; 121 122 /* Can deadlock when called with interrupts disabled */ 123 WARN_ON(irqs_disabled()); 124 125 data.func = func; 126 data.info = info; 127 atomic_set(&data.started, 0); 128 data.wait = wait; 129 if (wait) 130 atomic_set(&data.finished, 0); 131 132 spin_lock(&call_lock); 133 call_data = &data; 134 /* Send a message to all other CPUs and wait for them to respond */ 135 smp_ext_bitcall_others(ec_call_function); 136 137 /* Wait for response */ 138 while (atomic_read(&data.started) != cpus) 139 cpu_relax(); 140 141 if (wait) 142 while (atomic_read(&data.finished) != cpus) 143 cpu_relax(); 144 spin_unlock(&call_lock); 145 146 return 0; 147 } 148 149 /* 150 * Call a function on one CPU 151 * cpu : the CPU the function should be executed on 152 * 153 * You must not call this function with disabled interrupts or from a 154 * hardware interrupt handler. You may call it from a bottom half. 155 * 156 * It is guaranteed that the called function runs on the specified CPU, 157 * preemption is disabled. 158 */ 159 int smp_call_function_on(void (*func) (void *info), void *info, 160 int nonatomic, int wait, int cpu) 161 { 162 struct call_data_struct data; 163 int curr_cpu; 164 165 if (!cpu_online(cpu)) 166 return -EINVAL; 167 168 /* disable preemption for local function call */ 169 curr_cpu = get_cpu(); 170 171 if (curr_cpu == cpu) { 172 /* direct call to function */ 173 func(info); 174 put_cpu(); 175 return 0; 176 } 177 178 data.func = func; 179 data.info = info; 180 atomic_set(&data.started, 0); 181 data.wait = wait; 182 if (wait) 183 atomic_set(&data.finished, 0); 184 185 spin_lock_bh(&call_lock); 186 call_data = &data; 187 smp_ext_bitcall(cpu, ec_call_function); 188 189 /* Wait for response */ 190 while (atomic_read(&data.started) != 1) 191 cpu_relax(); 192 193 if (wait) 194 while (atomic_read(&data.finished) != 1) 195 cpu_relax(); 196 197 spin_unlock_bh(&call_lock); 198 put_cpu(); 199 return 0; 200 } 201 EXPORT_SYMBOL(smp_call_function_on); 202 203 static inline void do_send_stop(void) 204 { 205 int cpu, rc; 206 207 /* stop all processors */ 208 for_each_online_cpu(cpu) { 209 if (cpu == smp_processor_id()) 210 continue; 211 do { 212 rc = signal_processor(cpu, sigp_stop); 213 } while (rc == sigp_busy); 214 } 215 } 216 217 static inline void do_store_status(void) 218 { 219 int cpu, rc; 220 221 /* store status of all processors in their lowcores (real 0) */ 222 for_each_online_cpu(cpu) { 223 if (cpu == smp_processor_id()) 224 continue; 225 do { 226 rc = signal_processor_p( 227 (__u32)(unsigned long) lowcore_ptr[cpu], cpu, 228 sigp_store_status_at_address); 229 } while(rc == sigp_busy); 230 } 231 } 232 233 /* 234 * this function sends a 'stop' sigp to all other CPUs in the system. 235 * it goes straight through. 236 */ 237 void smp_send_stop(void) 238 { 239 /* write magic number to zero page (absolute 0) */ 240 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; 241 242 /* stop other processors. */ 243 do_send_stop(); 244 245 /* store status of other processors. */ 246 do_store_status(); 247 } 248 249 /* 250 * Reboot, halt and power_off routines for SMP. 251 */ 252 253 static void do_machine_restart(void * __unused) 254 { 255 int cpu; 256 static atomic_t cpuid = ATOMIC_INIT(-1); 257 258 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) 259 signal_processor(smp_processor_id(), sigp_stop); 260 261 /* Wait for all other cpus to enter stopped state */ 262 for_each_online_cpu(cpu) { 263 if (cpu == smp_processor_id()) 264 continue; 265 while(!smp_cpu_not_running(cpu)) 266 cpu_relax(); 267 } 268 269 /* Store status of other cpus. */ 270 do_store_status(); 271 272 /* 273 * Finally call reipl. Because we waited for all other 274 * cpus to enter this function we know that they do 275 * not hold any s390irq-locks (the cpus have been 276 * interrupted by an external interrupt and s390irq 277 * locks are always held disabled). 278 */ 279 do_reipl(); 280 } 281 282 void machine_restart_smp(char * __unused) 283 { 284 on_each_cpu(do_machine_restart, NULL, 0, 0); 285 } 286 287 static void do_wait_for_stop(void) 288 { 289 unsigned long cr[16]; 290 291 __ctl_store(cr, 0, 15); 292 cr[0] &= ~0xffff; 293 cr[6] = 0; 294 __ctl_load(cr, 0, 15); 295 for (;;) 296 enabled_wait(); 297 } 298 299 static void do_machine_halt(void * __unused) 300 { 301 static atomic_t cpuid = ATOMIC_INIT(-1); 302 303 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { 304 smp_send_stop(); 305 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 306 cpcmd(vmhalt_cmd, NULL, 0, NULL); 307 signal_processor(smp_processor_id(), 308 sigp_stop_and_store_status); 309 } 310 do_wait_for_stop(); 311 } 312 313 void machine_halt_smp(void) 314 { 315 on_each_cpu(do_machine_halt, NULL, 0, 0); 316 } 317 318 static void do_machine_power_off(void * __unused) 319 { 320 static atomic_t cpuid = ATOMIC_INIT(-1); 321 322 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { 323 smp_send_stop(); 324 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 325 cpcmd(vmpoff_cmd, NULL, 0, NULL); 326 signal_processor(smp_processor_id(), 327 sigp_stop_and_store_status); 328 } 329 do_wait_for_stop(); 330 } 331 332 void machine_power_off_smp(void) 333 { 334 on_each_cpu(do_machine_power_off, NULL, 0, 0); 335 } 336 337 /* 338 * This is the main routine where commands issued by other 339 * cpus are handled. 340 */ 341 342 void do_ext_call_interrupt(__u16 code) 343 { 344 unsigned long bits; 345 346 /* 347 * handle bit signal external calls 348 * 349 * For the ec_schedule signal we have to do nothing. All the work 350 * is done automatically when we return from the interrupt. 351 */ 352 bits = xchg(&S390_lowcore.ext_call_fast, 0); 353 354 if (test_bit(ec_call_function, &bits)) 355 do_call_function(); 356 } 357 358 /* 359 * Send an external call sigp to another cpu and return without waiting 360 * for its completion. 361 */ 362 static void smp_ext_bitcall(int cpu, ec_bit_sig sig) 363 { 364 /* 365 * Set signaling bit in lowcore of target cpu and kick it 366 */ 367 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 368 while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 369 udelay(10); 370 } 371 372 /* 373 * Send an external call sigp to every other cpu in the system and 374 * return without waiting for its completion. 375 */ 376 static void smp_ext_bitcall_others(ec_bit_sig sig) 377 { 378 int cpu; 379 380 for_each_online_cpu(cpu) { 381 if (cpu == smp_processor_id()) 382 continue; 383 /* 384 * Set signaling bit in lowcore of target cpu and kick it 385 */ 386 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); 387 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) 388 udelay(10); 389 } 390 } 391 392 #ifndef CONFIG_64BIT 393 /* 394 * this function sends a 'purge tlb' signal to another CPU. 395 */ 396 void smp_ptlb_callback(void *info) 397 { 398 local_flush_tlb(); 399 } 400 401 void smp_ptlb_all(void) 402 { 403 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 404 } 405 EXPORT_SYMBOL(smp_ptlb_all); 406 #endif /* ! CONFIG_64BIT */ 407 408 /* 409 * this function sends a 'reschedule' IPI to another CPU. 410 * it goes straight through and wastes no time serializing 411 * anything. Worst case is that we lose a reschedule ... 412 */ 413 void smp_send_reschedule(int cpu) 414 { 415 smp_ext_bitcall(cpu, ec_schedule); 416 } 417 418 /* 419 * parameter area for the set/clear control bit callbacks 420 */ 421 struct ec_creg_mask_parms { 422 unsigned long orvals[16]; 423 unsigned long andvals[16]; 424 }; 425 426 /* 427 * callback for setting/clearing control bits 428 */ 429 void smp_ctl_bit_callback(void *info) { 430 struct ec_creg_mask_parms *pp = info; 431 unsigned long cregs[16]; 432 int i; 433 434 __ctl_store(cregs, 0, 15); 435 for (i = 0; i <= 15; i++) 436 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; 437 __ctl_load(cregs, 0, 15); 438 } 439 440 /* 441 * Set a bit in a control register of all cpus 442 */ 443 void smp_ctl_set_bit(int cr, int bit) 444 { 445 struct ec_creg_mask_parms parms; 446 447 memset(&parms.orvals, 0, sizeof(parms.orvals)); 448 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 449 parms.orvals[cr] = 1 << bit; 450 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 451 } 452 453 /* 454 * Clear a bit in a control register of all cpus 455 */ 456 void smp_ctl_clear_bit(int cr, int bit) 457 { 458 struct ec_creg_mask_parms parms; 459 460 memset(&parms.orvals, 0, sizeof(parms.orvals)); 461 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 462 parms.andvals[cr] = ~(1L << bit); 463 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 464 } 465 466 /* 467 * Lets check how many CPUs we have. 468 */ 469 470 static unsigned int 471 __init smp_count_cpus(void) 472 { 473 unsigned int cpu, num_cpus; 474 __u16 boot_cpu_addr; 475 476 /* 477 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. 478 */ 479 480 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 481 current_thread_info()->cpu = 0; 482 num_cpus = 1; 483 for (cpu = 0; cpu <= 65535; cpu++) { 484 if ((__u16) cpu == boot_cpu_addr) 485 continue; 486 __cpu_logical_map[1] = (__u16) cpu; 487 if (signal_processor(1, sigp_sense) == 488 sigp_not_operational) 489 continue; 490 num_cpus++; 491 } 492 493 printk("Detected %d CPU's\n",(int) num_cpus); 494 printk("Boot cpu address %2X\n", boot_cpu_addr); 495 496 return num_cpus; 497 } 498 499 /* 500 * Activate a secondary processor. 501 */ 502 extern void init_cpu_timer(void); 503 extern void init_cpu_vtimer(void); 504 extern int pfault_init(void); 505 extern void pfault_fini(void); 506 507 int __devinit start_secondary(void *cpuvoid) 508 { 509 /* Setup the cpu */ 510 cpu_init(); 511 preempt_disable(); 512 /* init per CPU timer */ 513 init_cpu_timer(); 514 #ifdef CONFIG_VIRT_TIMER 515 init_cpu_vtimer(); 516 #endif 517 #ifdef CONFIG_PFAULT 518 /* Enable pfault pseudo page faults on this cpu. */ 519 if (MACHINE_IS_VM) 520 pfault_init(); 521 #endif 522 /* Mark this cpu as online */ 523 cpu_set(smp_processor_id(), cpu_online_map); 524 /* Switch on interrupts */ 525 local_irq_enable(); 526 /* Print info about this processor */ 527 print_cpu_info(&S390_lowcore.cpu_data); 528 /* cpu_idle will call schedule for us */ 529 cpu_idle(); 530 return 0; 531 } 532 533 static void __init smp_create_idle(unsigned int cpu) 534 { 535 struct task_struct *p; 536 537 /* 538 * don't care about the psw and regs settings since we'll never 539 * reschedule the forked task. 540 */ 541 p = fork_idle(cpu); 542 if (IS_ERR(p)) 543 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 544 current_set[cpu] = p; 545 } 546 547 /* Reserving and releasing of CPUs */ 548 549 static DEFINE_SPINLOCK(smp_reserve_lock); 550 static int smp_cpu_reserved[NR_CPUS]; 551 552 int 553 smp_get_cpu(cpumask_t cpu_mask) 554 { 555 unsigned long flags; 556 int cpu; 557 558 spin_lock_irqsave(&smp_reserve_lock, flags); 559 /* Try to find an already reserved cpu. */ 560 for_each_cpu_mask(cpu, cpu_mask) { 561 if (smp_cpu_reserved[cpu] != 0) { 562 smp_cpu_reserved[cpu]++; 563 /* Found one. */ 564 goto out; 565 } 566 } 567 /* Reserve a new cpu from cpu_mask. */ 568 for_each_cpu_mask(cpu, cpu_mask) { 569 if (cpu_online(cpu)) { 570 smp_cpu_reserved[cpu]++; 571 goto out; 572 } 573 } 574 cpu = -ENODEV; 575 out: 576 spin_unlock_irqrestore(&smp_reserve_lock, flags); 577 return cpu; 578 } 579 580 void 581 smp_put_cpu(int cpu) 582 { 583 unsigned long flags; 584 585 spin_lock_irqsave(&smp_reserve_lock, flags); 586 smp_cpu_reserved[cpu]--; 587 spin_unlock_irqrestore(&smp_reserve_lock, flags); 588 } 589 590 static inline int 591 cpu_stopped(int cpu) 592 { 593 __u32 status; 594 595 /* Check for stopped state */ 596 if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { 597 if (status & 0x40) 598 return 1; 599 } 600 return 0; 601 } 602 603 /* Upping and downing of CPUs */ 604 605 int 606 __cpu_up(unsigned int cpu) 607 { 608 struct task_struct *idle; 609 struct _lowcore *cpu_lowcore; 610 struct stack_frame *sf; 611 sigp_ccode ccode; 612 int curr_cpu; 613 614 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { 615 __cpu_logical_map[cpu] = (__u16) curr_cpu; 616 if (cpu_stopped(cpu)) 617 break; 618 } 619 620 if (!cpu_stopped(cpu)) 621 return -ENODEV; 622 623 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 624 cpu, sigp_set_prefix); 625 if (ccode){ 626 printk("sigp_set_prefix failed for cpu %d " 627 "with condition code %d\n", 628 (int) cpu, (int) ccode); 629 return -EIO; 630 } 631 632 idle = current_set[cpu]; 633 cpu_lowcore = lowcore_ptr[cpu]; 634 cpu_lowcore->kernel_stack = (unsigned long) 635 task_stack_page(idle) + (THREAD_SIZE); 636 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack 637 - sizeof(struct pt_regs) 638 - sizeof(struct stack_frame)); 639 memset(sf, 0, sizeof(struct stack_frame)); 640 sf->gprs[9] = (unsigned long) sf; 641 cpu_lowcore->save_area[15] = (unsigned long) sf; 642 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); 643 asm volatile( 644 " stam 0,15,0(%0)" 645 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 646 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 647 cpu_lowcore->current_task = (unsigned long) idle; 648 cpu_lowcore->cpu_data.cpu_nr = cpu; 649 eieio(); 650 651 while (signal_processor(cpu,sigp_restart) == sigp_busy) 652 udelay(10); 653 654 while (!cpu_online(cpu)) 655 cpu_relax(); 656 return 0; 657 } 658 659 static unsigned int __initdata additional_cpus; 660 static unsigned int __initdata possible_cpus; 661 662 void __init smp_setup_cpu_possible_map(void) 663 { 664 unsigned int phy_cpus, pos_cpus, cpu; 665 666 phy_cpus = smp_count_cpus(); 667 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); 668 669 if (possible_cpus) 670 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); 671 672 for (cpu = 0; cpu < pos_cpus; cpu++) 673 cpu_set(cpu, cpu_possible_map); 674 675 phy_cpus = min(phy_cpus, pos_cpus); 676 677 for (cpu = 0; cpu < phy_cpus; cpu++) 678 cpu_set(cpu, cpu_present_map); 679 } 680 681 #ifdef CONFIG_HOTPLUG_CPU 682 683 static int __init setup_additional_cpus(char *s) 684 { 685 additional_cpus = simple_strtoul(s, NULL, 0); 686 return 0; 687 } 688 early_param("additional_cpus", setup_additional_cpus); 689 690 static int __init setup_possible_cpus(char *s) 691 { 692 possible_cpus = simple_strtoul(s, NULL, 0); 693 return 0; 694 } 695 early_param("possible_cpus", setup_possible_cpus); 696 697 int 698 __cpu_disable(void) 699 { 700 unsigned long flags; 701 struct ec_creg_mask_parms cr_parms; 702 int cpu = smp_processor_id(); 703 704 spin_lock_irqsave(&smp_reserve_lock, flags); 705 if (smp_cpu_reserved[cpu] != 0) { 706 spin_unlock_irqrestore(&smp_reserve_lock, flags); 707 return -EBUSY; 708 } 709 cpu_clear(cpu, cpu_online_map); 710 711 #ifdef CONFIG_PFAULT 712 /* Disable pfault pseudo page faults on this cpu. */ 713 if (MACHINE_IS_VM) 714 pfault_fini(); 715 #endif 716 717 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); 718 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); 719 720 /* disable all external interrupts */ 721 cr_parms.orvals[0] = 0; 722 cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | 723 1<<11 | 1<<10 | 1<< 6 | 1<< 4); 724 /* disable all I/O interrupts */ 725 cr_parms.orvals[6] = 0; 726 cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | 727 1<<27 | 1<<26 | 1<<25 | 1<<24); 728 /* disable most machine checks */ 729 cr_parms.orvals[14] = 0; 730 cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); 731 732 smp_ctl_bit_callback(&cr_parms); 733 734 spin_unlock_irqrestore(&smp_reserve_lock, flags); 735 return 0; 736 } 737 738 void 739 __cpu_die(unsigned int cpu) 740 { 741 /* Wait until target cpu is down */ 742 while (!smp_cpu_not_running(cpu)) 743 cpu_relax(); 744 printk("Processor %d spun down\n", cpu); 745 } 746 747 void 748 cpu_die(void) 749 { 750 idle_task_exit(); 751 signal_processor(smp_processor_id(), sigp_stop); 752 BUG(); 753 for(;;); 754 } 755 756 #endif /* CONFIG_HOTPLUG_CPU */ 757 758 /* 759 * Cycle through the processors and setup structures. 760 */ 761 762 void __init smp_prepare_cpus(unsigned int max_cpus) 763 { 764 unsigned long stack; 765 unsigned int cpu; 766 int i; 767 768 /* request the 0x1201 emergency signal external interrupt */ 769 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 770 panic("Couldn't request external interrupt 0x1201"); 771 memset(lowcore_ptr,0,sizeof(lowcore_ptr)); 772 /* 773 * Initialize prefix pages and stacks for all possible cpus 774 */ 775 print_cpu_info(&S390_lowcore.cpu_data); 776 777 for_each_possible_cpu(i) { 778 lowcore_ptr[i] = (struct _lowcore *) 779 __get_free_pages(GFP_KERNEL|GFP_DMA, 780 sizeof(void*) == 8 ? 1 : 0); 781 stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); 782 if (lowcore_ptr[i] == NULL || stack == 0ULL) 783 panic("smp_boot_cpus failed to allocate memory\n"); 784 785 *(lowcore_ptr[i]) = S390_lowcore; 786 lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); 787 stack = __get_free_pages(GFP_KERNEL,0); 788 if (stack == 0ULL) 789 panic("smp_boot_cpus failed to allocate memory\n"); 790 lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); 791 #ifndef CONFIG_64BIT 792 if (MACHINE_HAS_IEEE) { 793 lowcore_ptr[i]->extended_save_area_addr = 794 (__u32) __get_free_pages(GFP_KERNEL,0); 795 if (lowcore_ptr[i]->extended_save_area_addr == 0) 796 panic("smp_boot_cpus failed to " 797 "allocate memory\n"); 798 } 799 #endif 800 } 801 #ifndef CONFIG_64BIT 802 if (MACHINE_HAS_IEEE) 803 ctl_set_bit(14, 29); /* enable extended save area */ 804 #endif 805 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); 806 807 for_each_possible_cpu(cpu) 808 if (cpu != smp_processor_id()) 809 smp_create_idle(cpu); 810 } 811 812 void __devinit smp_prepare_boot_cpu(void) 813 { 814 BUG_ON(smp_processor_id() != 0); 815 816 cpu_set(0, cpu_online_map); 817 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 818 current_set[0] = current; 819 } 820 821 void smp_cpus_done(unsigned int max_cpus) 822 { 823 cpu_present_map = cpu_possible_map; 824 } 825 826 /* 827 * the frequency of the profiling timer can be changed 828 * by writing a multiplier value into /proc/profile. 829 * 830 * usually you want to run this on all CPUs ;) 831 */ 832 int setup_profiling_timer(unsigned int multiplier) 833 { 834 return 0; 835 } 836 837 static DEFINE_PER_CPU(struct cpu, cpu_devices); 838 839 static int __init topology_init(void) 840 { 841 int cpu; 842 int ret; 843 844 for_each_possible_cpu(cpu) { 845 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu); 846 if (ret) 847 printk(KERN_WARNING "topology_init: register_cpu %d " 848 "failed (%d)\n", cpu, ret); 849 } 850 return 0; 851 } 852 853 subsys_initcall(topology_init); 854 855 EXPORT_SYMBOL(cpu_online_map); 856 EXPORT_SYMBOL(cpu_possible_map); 857 EXPORT_SYMBOL(lowcore_ptr); 858 EXPORT_SYMBOL(smp_ctl_set_bit); 859 EXPORT_SYMBOL(smp_ctl_clear_bit); 860 EXPORT_SYMBOL(smp_call_function); 861 EXPORT_SYMBOL(smp_get_cpu); 862 EXPORT_SYMBOL(smp_put_cpu); 863 864