1 /* 2 * SMP boot-related support 3 * 4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Copyright (C) 2001, 2004-2005 Intel Corp 7 * Rohit Seth <rohit.seth@intel.com> 8 * Suresh Siddha <suresh.b.siddha@intel.com> 9 * Gordon Jin <gordon.jin@intel.com> 10 * Ashok Raj <ashok.raj@intel.com> 11 * 12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. 13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. 14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. 15 * smp_boot_cpus()/smp_commence() is replaced by 16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). 17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support 18 * 04/12/26 Jin Gordon <gordon.jin@intel.com> 19 * 04/12/26 Rohit Seth <rohit.seth@intel.com> 20 * Add multi-threading and multi-core detection 21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> 22 * Setup cpu_sibling_map and cpu_core_map 23 */ 24 25 #include <linux/module.h> 26 #include <linux/acpi.h> 27 #include <linux/bootmem.h> 28 #include <linux/cpu.h> 29 #include <linux/delay.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/irq.h> 33 #include <linux/kernel.h> 34 #include <linux/kernel_stat.h> 35 #include <linux/mm.h> 36 #include <linux/notifier.h> 37 #include <linux/smp.h> 38 #include <linux/spinlock.h> 39 #include <linux/efi.h> 40 #include <linux/percpu.h> 41 #include <linux/bitops.h> 42 43 #include <asm/atomic.h> 44 #include <asm/cache.h> 45 #include <asm/current.h> 46 #include <asm/delay.h> 47 #include <asm/io.h> 48 #include <asm/irq.h> 49 #include <asm/machvec.h> 50 #include <asm/mca.h> 51 #include <asm/page.h> 52 #include <asm/paravirt.h> 53 #include <asm/pgalloc.h> 54 #include <asm/pgtable.h> 55 #include <asm/processor.h> 56 #include <asm/ptrace.h> 57 #include <asm/sal.h> 58 #include <asm/system.h> 59 #include <asm/tlbflush.h> 60 #include <asm/unistd.h> 61 #include <asm/sn/arch.h> 62 63 #define SMP_DEBUG 0 64 65 #if SMP_DEBUG 66 #define Dprintk(x...) printk(x) 67 #else 68 #define Dprintk(x...) 69 #endif 70 71 #ifdef CONFIG_HOTPLUG_CPU 72 #ifdef CONFIG_PERMIT_BSP_REMOVE 73 #define bsp_remove_ok 1 74 #else 75 #define bsp_remove_ok 0 76 #endif 77 78 /* 79 * Store all idle threads, this can be reused instead of creating 80 * a new thread. Also avoids complicated thread destroy functionality 81 * for idle threads. 82 */ 83 struct task_struct *idle_thread_array[NR_CPUS]; 84 85 /* 86 * Global array allocated for NR_CPUS at boot time 87 */ 88 struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; 89 90 /* 91 * start_ap in head.S uses this to store current booting cpu 92 * info. 93 */ 94 struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; 95 96 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); 97 98 #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 99 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) 100 101 #else 102 103 #define get_idle_for_cpu(x) (NULL) 104 #define set_idle_for_cpu(x,p) 105 #define set_brendez_area(x) 106 #endif 107 108 109 /* 110 * ITC synchronization related stuff: 111 */ 112 #define MASTER (0) 113 #define SLAVE (SMP_CACHE_BYTES/8) 114 115 #define NUM_ROUNDS 64 /* magic value */ 116 #define NUM_ITERS 5 /* likewise */ 117 118 static DEFINE_SPINLOCK(itc_sync_lock); 119 static volatile unsigned long go[SLAVE + 1]; 120 121 #define DEBUG_ITC_SYNC 0 122 123 extern void start_ap (void); 124 extern unsigned long ia64_iobase; 125 126 struct task_struct *task_for_booting_cpu; 127 128 /* 129 * State for each CPU 130 */ 131 DEFINE_PER_CPU(int, cpu_state); 132 133 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 134 EXPORT_SYMBOL(cpu_core_map); 135 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 136 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 137 138 int smp_num_siblings = 1; 139 140 /* which logical CPU number maps to which CPU (physical APIC ID) */ 141 volatile int ia64_cpu_to_sapicid[NR_CPUS]; 142 EXPORT_SYMBOL(ia64_cpu_to_sapicid); 143 144 static volatile cpumask_t cpu_callin_map; 145 146 struct smp_boot_data smp_boot_data __initdata; 147 148 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */ 149 150 char __initdata no_int_routing; 151 152 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ 153 154 #ifdef CONFIG_FORCE_CPEI_RETARGET 155 #define CPEI_OVERRIDE_DEFAULT (1) 156 #else 157 #define CPEI_OVERRIDE_DEFAULT (0) 158 #endif 159 160 unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; 161 162 static int __init 163 cmdl_force_cpei(char *str) 164 { 165 int value=0; 166 167 get_option (&str, &value); 168 force_cpei_retarget = value; 169 170 return 1; 171 } 172 173 __setup("force_cpei=", cmdl_force_cpei); 174 175 static int __init 176 nointroute (char *str) 177 { 178 no_int_routing = 1; 179 printk ("no_int_routing on\n"); 180 return 1; 181 } 182 183 __setup("nointroute", nointroute); 184 185 static void fix_b0_for_bsp(void) 186 { 187 #ifdef CONFIG_HOTPLUG_CPU 188 int cpuid; 189 static int fix_bsp_b0 = 1; 190 191 cpuid = smp_processor_id(); 192 193 /* 194 * Cache the b0 value on the first AP that comes up 195 */ 196 if (!(fix_bsp_b0 && cpuid)) 197 return; 198 199 sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; 200 printk ("Fixed BSP b0 value from CPU %d\n", cpuid); 201 202 fix_bsp_b0 = 0; 203 #endif 204 } 205 206 void 207 sync_master (void *arg) 208 { 209 unsigned long flags, i; 210 211 go[MASTER] = 0; 212 213 local_irq_save(flags); 214 { 215 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { 216 while (!go[MASTER]) 217 cpu_relax(); 218 go[MASTER] = 0; 219 go[SLAVE] = ia64_get_itc(); 220 } 221 } 222 local_irq_restore(flags); 223 } 224 225 /* 226 * Return the number of cycles by which our itc differs from the itc on the master 227 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master, 228 * negative that it is behind. 229 */ 230 static inline long 231 get_delta (long *rt, long *master) 232 { 233 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 234 unsigned long tcenter, t0, t1, tm; 235 long i; 236 237 for (i = 0; i < NUM_ITERS; ++i) { 238 t0 = ia64_get_itc(); 239 go[MASTER] = 1; 240 while (!(tm = go[SLAVE])) 241 cpu_relax(); 242 go[SLAVE] = 0; 243 t1 = ia64_get_itc(); 244 245 if (t1 - t0 < best_t1 - best_t0) 246 best_t0 = t0, best_t1 = t1, best_tm = tm; 247 } 248 249 *rt = best_t1 - best_t0; 250 *master = best_tm - best_t0; 251 252 /* average best_t0 and best_t1 without overflow: */ 253 tcenter = (best_t0/2 + best_t1/2); 254 if (best_t0 % 2 + best_t1 % 2 == 2) 255 ++tcenter; 256 return tcenter - best_tm; 257 } 258 259 /* 260 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU 261 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of 262 * unaccounted-for errors (such as getting a machine check in the middle of a calibration 263 * step). The basic idea is for the slave to ask the master what itc value it has and to 264 * read its own itc before and after the master responds. Each iteration gives us three 265 * timestamps: 266 * 267 * slave master 268 * 269 * t0 ---\ 270 * ---\ 271 * ---> 272 * tm 273 * /--- 274 * /--- 275 * t1 <--- 276 * 277 * 278 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0 279 * and t1. If we achieve this, the clocks are synchronized provided the interconnect 280 * between the slave and the master is symmetric. Even if the interconnect were 281 * asymmetric, we would still know that the synchronization error is smaller than the 282 * roundtrip latency (t0 - t1). 283 * 284 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to 285 * within one or two cycles. However, we can only *guarantee* that the synchronization is 286 * accurate to within a round-trip time, which is typically in the range of several 287 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually 288 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better 289 * than half a micro second or so. 290 */ 291 void 292 ia64_sync_itc (unsigned int master) 293 { 294 long i, delta, adj, adjust_latency = 0, done = 0; 295 unsigned long flags, rt, master_time_stamp, bound; 296 #if DEBUG_ITC_SYNC 297 struct { 298 long rt; /* roundtrip time */ 299 long master; /* master's timestamp */ 300 long diff; /* difference between midpoint and master's timestamp */ 301 long lat; /* estimate of itc adjustment latency */ 302 } t[NUM_ROUNDS]; 303 #endif 304 305 /* 306 * Make sure local timer ticks are disabled while we sync. If 307 * they were enabled, we'd have to worry about nasty issues 308 * like setting the ITC ahead of (or a long time before) the 309 * next scheduled tick. 310 */ 311 BUG_ON((ia64_get_itv() & (1 << 16)) == 0); 312 313 go[MASTER] = 1; 314 315 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { 316 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 317 return; 318 } 319 320 while (go[MASTER]) 321 cpu_relax(); /* wait for master to be ready */ 322 323 spin_lock_irqsave(&itc_sync_lock, flags); 324 { 325 for (i = 0; i < NUM_ROUNDS; ++i) { 326 delta = get_delta(&rt, &master_time_stamp); 327 if (delta == 0) { 328 done = 1; /* let's lock on to this... */ 329 bound = rt; 330 } 331 332 if (!done) { 333 if (i > 0) { 334 adjust_latency += -delta; 335 adj = -delta + adjust_latency/4; 336 } else 337 adj = -delta; 338 339 ia64_set_itc(ia64_get_itc() + adj); 340 } 341 #if DEBUG_ITC_SYNC 342 t[i].rt = rt; 343 t[i].master = master_time_stamp; 344 t[i].diff = delta; 345 t[i].lat = adjust_latency/4; 346 #endif 347 } 348 } 349 spin_unlock_irqrestore(&itc_sync_lock, flags); 350 351 #if DEBUG_ITC_SYNC 352 for (i = 0; i < NUM_ROUNDS; ++i) 353 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 354 t[i].rt, t[i].master, t[i].diff, t[i].lat); 355 #endif 356 357 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " 358 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); 359 } 360 361 /* 362 * Ideally sets up per-cpu profiling hooks. Doesn't do much now... 363 */ 364 static inline void __devinit 365 smp_setup_percpu_timer (void) 366 { 367 } 368 369 static void __cpuinit 370 smp_callin (void) 371 { 372 int cpuid, phys_id, itc_master; 373 struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; 374 extern void ia64_init_itm(void); 375 extern volatile int time_keeper_id; 376 377 #ifdef CONFIG_PERFMON 378 extern void pfm_init_percpu(void); 379 #endif 380 381 cpuid = smp_processor_id(); 382 phys_id = hard_smp_processor_id(); 383 itc_master = time_keeper_id; 384 385 if (cpu_online(cpuid)) { 386 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", 387 phys_id, cpuid); 388 BUG(); 389 } 390 391 fix_b0_for_bsp(); 392 393 #ifdef CONFIG_NUMA 394 /* 395 * numa_node_id() works after this. 396 */ 397 set_numa_node(cpu_to_node_map[cpuid]); 398 set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); 399 #endif 400 401 ipi_call_lock_irq(); 402 spin_lock(&vector_lock); 403 /* Setup the per cpu irq handling data structures */ 404 __setup_vector_irq(cpuid); 405 notify_cpu_starting(cpuid); 406 cpu_set(cpuid, cpu_online_map); 407 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 408 spin_unlock(&vector_lock); 409 ipi_call_unlock_irq(); 410 411 smp_setup_percpu_timer(); 412 413 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ 414 415 #ifdef CONFIG_PERFMON 416 pfm_init_percpu(); 417 #endif 418 419 local_irq_enable(); 420 421 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { 422 /* 423 * Synchronize the ITC with the BP. Need to do this after irqs are 424 * enabled because ia64_sync_itc() calls smp_call_function_single(), which 425 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls 426 * local_bh_enable(), which bugs out if irqs are not enabled... 427 */ 428 Dprintk("Going to syncup ITC with ITC Master.\n"); 429 ia64_sync_itc(itc_master); 430 } 431 432 /* 433 * Get our bogomips. 434 */ 435 ia64_init_itm(); 436 437 /* 438 * Delay calibration can be skipped if new processor is identical to the 439 * previous processor. 440 */ 441 last_cpuinfo = cpu_data(cpuid - 1); 442 this_cpuinfo = local_cpu_data; 443 if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || 444 last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || 445 last_cpuinfo->features != this_cpuinfo->features || 446 last_cpuinfo->revision != this_cpuinfo->revision || 447 last_cpuinfo->family != this_cpuinfo->family || 448 last_cpuinfo->archrev != this_cpuinfo->archrev || 449 last_cpuinfo->model != this_cpuinfo->model) 450 calibrate_delay(); 451 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 452 453 /* 454 * Allow the master to continue. 455 */ 456 cpu_set(cpuid, cpu_callin_map); 457 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); 458 } 459 460 461 /* 462 * Activate a secondary processor. head.S calls this. 463 */ 464 int __cpuinit 465 start_secondary (void *unused) 466 { 467 /* Early console may use I/O ports */ 468 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 469 #ifndef CONFIG_PRINTK_TIME 470 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); 471 #endif 472 efi_map_pal_code(); 473 cpu_init(); 474 preempt_disable(); 475 smp_callin(); 476 477 cpu_idle(); 478 return 0; 479 } 480 481 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 482 { 483 return NULL; 484 } 485 486 struct create_idle { 487 struct work_struct work; 488 struct task_struct *idle; 489 struct completion done; 490 int cpu; 491 }; 492 493 void __cpuinit 494 do_fork_idle(struct work_struct *work) 495 { 496 struct create_idle *c_idle = 497 container_of(work, struct create_idle, work); 498 499 c_idle->idle = fork_idle(c_idle->cpu); 500 complete(&c_idle->done); 501 } 502 503 static int __cpuinit 504 do_boot_cpu (int sapicid, int cpu) 505 { 506 int timeout; 507 struct create_idle c_idle = { 508 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), 509 .cpu = cpu, 510 .done = COMPLETION_INITIALIZER(c_idle.done), 511 }; 512 513 c_idle.idle = get_idle_for_cpu(cpu); 514 if (c_idle.idle) { 515 init_idle(c_idle.idle, cpu); 516 goto do_rest; 517 } 518 519 /* 520 * We can't use kernel_thread since we must avoid to reschedule the child. 521 */ 522 if (!keventd_up() || current_is_keventd()) 523 c_idle.work.func(&c_idle.work); 524 else { 525 schedule_work(&c_idle.work); 526 wait_for_completion(&c_idle.done); 527 } 528 529 if (IS_ERR(c_idle.idle)) 530 panic("failed fork for CPU %d", cpu); 531 532 set_idle_for_cpu(cpu, c_idle.idle); 533 534 do_rest: 535 task_for_booting_cpu = c_idle.idle; 536 537 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); 538 539 set_brendez_area(cpu); 540 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); 541 542 /* 543 * Wait 10s total for the AP to start 544 */ 545 Dprintk("Waiting on callin_map ..."); 546 for (timeout = 0; timeout < 100000; timeout++) { 547 if (cpu_isset(cpu, cpu_callin_map)) 548 break; /* It has booted */ 549 udelay(100); 550 } 551 Dprintk("\n"); 552 553 if (!cpu_isset(cpu, cpu_callin_map)) { 554 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); 555 ia64_cpu_to_sapicid[cpu] = -1; 556 cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */ 557 return -EINVAL; 558 } 559 return 0; 560 } 561 562 static int __init 563 decay (char *str) 564 { 565 int ticks; 566 get_option (&str, &ticks); 567 return 1; 568 } 569 570 __setup("decay=", decay); 571 572 /* 573 * Initialize the logical CPU number to SAPICID mapping 574 */ 575 void __init 576 smp_build_cpu_map (void) 577 { 578 int sapicid, cpu, i; 579 int boot_cpu_id = hard_smp_processor_id(); 580 581 for (cpu = 0; cpu < NR_CPUS; cpu++) { 582 ia64_cpu_to_sapicid[cpu] = -1; 583 } 584 585 ia64_cpu_to_sapicid[0] = boot_cpu_id; 586 cpus_clear(cpu_present_map); 587 set_cpu_present(0, true); 588 set_cpu_possible(0, true); 589 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { 590 sapicid = smp_boot_data.cpu_phys_id[i]; 591 if (sapicid == boot_cpu_id) 592 continue; 593 set_cpu_present(cpu, true); 594 set_cpu_possible(cpu, true); 595 ia64_cpu_to_sapicid[cpu] = sapicid; 596 cpu++; 597 } 598 } 599 600 /* 601 * Cycle through the APs sending Wakeup IPIs to boot each. 602 */ 603 void __init 604 smp_prepare_cpus (unsigned int max_cpus) 605 { 606 int boot_cpu_id = hard_smp_processor_id(); 607 608 /* 609 * Initialize the per-CPU profiling counter/multiplier 610 */ 611 612 smp_setup_percpu_timer(); 613 614 /* 615 * We have the boot CPU online for sure. 616 */ 617 cpu_set(0, cpu_online_map); 618 cpu_set(0, cpu_callin_map); 619 620 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 621 ia64_cpu_to_sapicid[0] = boot_cpu_id; 622 623 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); 624 625 current_thread_info()->cpu = 0; 626 627 /* 628 * If SMP should be disabled, then really disable it! 629 */ 630 if (!max_cpus) { 631 printk(KERN_INFO "SMP mode deactivated.\n"); 632 init_cpu_online(cpumask_of(0)); 633 init_cpu_present(cpumask_of(0)); 634 init_cpu_possible(cpumask_of(0)); 635 return; 636 } 637 } 638 639 void __devinit smp_prepare_boot_cpu(void) 640 { 641 cpu_set(smp_processor_id(), cpu_online_map); 642 cpu_set(smp_processor_id(), cpu_callin_map); 643 #ifdef CONFIG_NUMA 644 set_numa_node(cpu_to_node_map[smp_processor_id()]); 645 #endif 646 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 647 paravirt_post_smp_prepare_boot_cpu(); 648 } 649 650 #ifdef CONFIG_HOTPLUG_CPU 651 static inline void 652 clear_cpu_sibling_map(int cpu) 653 { 654 int i; 655 656 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 657 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 658 for_each_cpu_mask(i, cpu_core_map[cpu]) 659 cpu_clear(cpu, cpu_core_map[i]); 660 661 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; 662 } 663 664 static void 665 remove_siblinginfo(int cpu) 666 { 667 int last = 0; 668 669 if (cpu_data(cpu)->threads_per_core == 1 && 670 cpu_data(cpu)->cores_per_socket == 1) { 671 cpu_clear(cpu, cpu_core_map[cpu]); 672 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); 673 return; 674 } 675 676 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); 677 678 /* remove it from all sibling map's */ 679 clear_cpu_sibling_map(cpu); 680 } 681 682 extern void fixup_irqs(void); 683 684 int migrate_platform_irqs(unsigned int cpu) 685 { 686 int new_cpei_cpu; 687 struct irq_desc *desc = NULL; 688 const struct cpumask *mask; 689 int retval = 0; 690 691 /* 692 * dont permit CPEI target to removed. 693 */ 694 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { 695 printk ("CPU (%d) is CPEI Target\n", cpu); 696 if (can_cpei_retarget()) { 697 /* 698 * Now re-target the CPEI to a different processor 699 */ 700 new_cpei_cpu = any_online_cpu(cpu_online_map); 701 mask = cpumask_of(new_cpei_cpu); 702 set_cpei_target_cpu(new_cpei_cpu); 703 desc = irq_desc + ia64_cpe_irq; 704 /* 705 * Switch for now, immediately, we need to do fake intr 706 * as other interrupts, but need to study CPEI behaviour with 707 * polling before making changes. 708 */ 709 if (desc) { 710 desc->chip->disable(ia64_cpe_irq); 711 desc->chip->set_affinity(ia64_cpe_irq, mask); 712 desc->chip->enable(ia64_cpe_irq); 713 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 714 } 715 } 716 if (!desc) { 717 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 718 retval = -EBUSY; 719 } 720 } 721 return retval; 722 } 723 724 /* must be called with cpucontrol mutex held */ 725 int __cpu_disable(void) 726 { 727 int cpu = smp_processor_id(); 728 729 /* 730 * dont permit boot processor for now 731 */ 732 if (cpu == 0 && !bsp_remove_ok) { 733 printk ("Your platform does not support removal of BSP\n"); 734 return (-EBUSY); 735 } 736 737 if (ia64_platform_is("sn2")) { 738 if (!sn_cpu_disable_allowed(cpu)) 739 return -EBUSY; 740 } 741 742 cpu_clear(cpu, cpu_online_map); 743 744 if (migrate_platform_irqs(cpu)) { 745 cpu_set(cpu, cpu_online_map); 746 return -EBUSY; 747 } 748 749 remove_siblinginfo(cpu); 750 fixup_irqs(); 751 local_flush_tlb_all(); 752 cpu_clear(cpu, cpu_callin_map); 753 return 0; 754 } 755 756 void __cpu_die(unsigned int cpu) 757 { 758 unsigned int i; 759 760 for (i = 0; i < 100; i++) { 761 /* They ack this in play_dead by setting CPU_DEAD */ 762 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 763 { 764 printk ("CPU %d is now offline\n", cpu); 765 return; 766 } 767 msleep(100); 768 } 769 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 770 } 771 #endif /* CONFIG_HOTPLUG_CPU */ 772 773 void 774 smp_cpus_done (unsigned int dummy) 775 { 776 int cpu; 777 unsigned long bogosum = 0; 778 779 /* 780 * Allow the user to impress friends. 781 */ 782 783 for_each_online_cpu(cpu) { 784 bogosum += cpu_data(cpu)->loops_per_jiffy; 785 } 786 787 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 788 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); 789 } 790 791 static inline void __devinit 792 set_cpu_sibling_map(int cpu) 793 { 794 int i; 795 796 for_each_online_cpu(i) { 797 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { 798 cpu_set(i, cpu_core_map[cpu]); 799 cpu_set(cpu, cpu_core_map[i]); 800 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 801 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 802 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 803 } 804 } 805 } 806 } 807 808 int __cpuinit 809 __cpu_up (unsigned int cpu) 810 { 811 int ret; 812 int sapicid; 813 814 sapicid = ia64_cpu_to_sapicid[cpu]; 815 if (sapicid == -1) 816 return -EINVAL; 817 818 /* 819 * Already booted cpu? not valid anymore since we dont 820 * do idle loop tightspin anymore. 821 */ 822 if (cpu_isset(cpu, cpu_callin_map)) 823 return -EINVAL; 824 825 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 826 /* Processor goes to start_secondary(), sets online flag */ 827 ret = do_boot_cpu(sapicid, cpu); 828 if (ret < 0) 829 return ret; 830 831 if (cpu_data(cpu)->threads_per_core == 1 && 832 cpu_data(cpu)->cores_per_socket == 1) { 833 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 834 cpu_set(cpu, cpu_core_map[cpu]); 835 return 0; 836 } 837 838 set_cpu_sibling_map(cpu); 839 840 return 0; 841 } 842 843 /* 844 * Assume that CPUs have been discovered by some platform-dependent interface. For 845 * SoftSDV/Lion, that would be ACPI. 846 * 847 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). 848 */ 849 void __init 850 init_smp_config(void) 851 { 852 struct fptr { 853 unsigned long fp; 854 unsigned long gp; 855 } *ap_startup; 856 long sal_ret; 857 858 /* Tell SAL where to drop the APs. */ 859 ap_startup = (struct fptr *) start_ap; 860 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, 861 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); 862 if (sal_ret < 0) 863 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", 864 ia64_sal_strerror(sal_ret)); 865 } 866 867 /* 868 * identify_siblings(cpu) gets called from identify_cpu. This populates the 869 * information related to logical execution units in per_cpu_data structure. 870 */ 871 void __devinit 872 identify_siblings(struct cpuinfo_ia64 *c) 873 { 874 long status; 875 u16 pltid; 876 pal_logical_to_physical_t info; 877 878 status = ia64_pal_logical_to_phys(-1, &info); 879 if (status != PAL_STATUS_SUCCESS) { 880 if (status != PAL_STATUS_UNIMPLEMENTED) { 881 printk(KERN_ERR 882 "ia64_pal_logical_to_phys failed with %ld\n", 883 status); 884 return; 885 } 886 887 info.overview_ppid = 0; 888 info.overview_cpp = 1; 889 info.overview_tpc = 1; 890 } 891 892 status = ia64_sal_physical_id_info(&pltid); 893 if (status != PAL_STATUS_SUCCESS) { 894 if (status != PAL_STATUS_UNIMPLEMENTED) 895 printk(KERN_ERR 896 "ia64_sal_pltid failed with %ld\n", 897 status); 898 return; 899 } 900 901 c->socket_id = (pltid << 8) | info.overview_ppid; 902 903 if (info.overview_cpp == 1 && info.overview_tpc == 1) 904 return; 905 906 c->cores_per_socket = info.overview_cpp; 907 c->threads_per_core = info.overview_tpc; 908 c->num_log = info.overview_num_log; 909 910 c->core_id = info.log1_cid; 911 c->thread_id = info.log1_tid; 912 } 913 914 /* 915 * returns non zero, if multi-threading is enabled 916 * on at least one physical package. Due to hotplug cpu 917 * and (maxcpus=), all threads may not necessarily be enabled 918 * even though the processor supports multi-threading. 919 */ 920 int is_multithreading_enabled(void) 921 { 922 int i, j; 923 924 for_each_present_cpu(i) { 925 for_each_present_cpu(j) { 926 if (j == i) 927 continue; 928 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { 929 if (cpu_data(j)->core_id == cpu_data(i)->core_id) 930 return 1; 931 } 932 } 933 } 934 return 0; 935 } 936 EXPORT_SYMBOL_GPL(is_multithreading_enabled); 937