1 /* 2 * SMP boot-related support 3 * 4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Copyright (C) 2001, 2004-2005 Intel Corp 7 * Rohit Seth <rohit.seth@intel.com> 8 * Suresh Siddha <suresh.b.siddha@intel.com> 9 * Gordon Jin <gordon.jin@intel.com> 10 * Ashok Raj <ashok.raj@intel.com> 11 * 12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. 13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. 14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence. 15 * smp_boot_cpus()/smp_commence() is replaced by 16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). 17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support 18 * 04/12/26 Jin Gordon <gordon.jin@intel.com> 19 * 04/12/26 Rohit Seth <rohit.seth@intel.com> 20 * Add multi-threading and multi-core detection 21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com> 22 * Setup cpu_sibling_map and cpu_core_map 23 */ 24 25 #include <linux/module.h> 26 #include <linux/acpi.h> 27 #include <linux/bootmem.h> 28 #include <linux/cpu.h> 29 #include <linux/delay.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/irq.h> 33 #include <linux/kernel.h> 34 #include <linux/kernel_stat.h> 35 #include <linux/mm.h> 36 #include <linux/notifier.h> 37 #include <linux/smp.h> 38 #include <linux/spinlock.h> 39 #include <linux/efi.h> 40 #include <linux/percpu.h> 41 #include <linux/bitops.h> 42 43 #include <asm/atomic.h> 44 #include <asm/cache.h> 45 #include <asm/current.h> 46 #include <asm/delay.h> 47 #include <asm/ia32.h> 48 #include <asm/io.h> 49 #include <asm/irq.h> 50 #include <asm/machvec.h> 51 #include <asm/mca.h> 52 #include <asm/page.h> 53 #include <asm/paravirt.h> 54 #include <asm/pgalloc.h> 55 #include <asm/pgtable.h> 56 #include <asm/processor.h> 57 #include <asm/ptrace.h> 58 #include <asm/sal.h> 59 #include <asm/system.h> 60 #include <asm/tlbflush.h> 61 #include <asm/unistd.h> 62 #include <asm/sn/arch.h> 63 64 #define SMP_DEBUG 0 65 66 #if SMP_DEBUG 67 #define Dprintk(x...) printk(x) 68 #else 69 #define Dprintk(x...) 70 #endif 71 72 #ifdef CONFIG_HOTPLUG_CPU 73 #ifdef CONFIG_PERMIT_BSP_REMOVE 74 #define bsp_remove_ok 1 75 #else 76 #define bsp_remove_ok 0 77 #endif 78 79 /* 80 * Store all idle threads, this can be reused instead of creating 81 * a new thread. Also avoids complicated thread destroy functionality 82 * for idle threads. 83 */ 84 struct task_struct *idle_thread_array[NR_CPUS]; 85 86 /* 87 * Global array allocated for NR_CPUS at boot time 88 */ 89 struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS]; 90 91 /* 92 * start_ap in head.S uses this to store current booting cpu 93 * info. 94 */ 95 struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0]; 96 97 #define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]); 98 99 #define get_idle_for_cpu(x) (idle_thread_array[(x)]) 100 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) 101 102 #else 103 104 #define get_idle_for_cpu(x) (NULL) 105 #define set_idle_for_cpu(x,p) 106 #define set_brendez_area(x) 107 #endif 108 109 110 /* 111 * ITC synchronization related stuff: 112 */ 113 #define MASTER (0) 114 #define SLAVE (SMP_CACHE_BYTES/8) 115 116 #define NUM_ROUNDS 64 /* magic value */ 117 #define NUM_ITERS 5 /* likewise */ 118 119 static DEFINE_SPINLOCK(itc_sync_lock); 120 static volatile unsigned long go[SLAVE + 1]; 121 122 #define DEBUG_ITC_SYNC 0 123 124 extern void start_ap (void); 125 extern unsigned long ia64_iobase; 126 127 struct task_struct *task_for_booting_cpu; 128 129 /* 130 * State for each CPU 131 */ 132 DEFINE_PER_CPU(int, cpu_state); 133 134 /* Bitmasks of currently online, and possible CPUs */ 135 cpumask_t cpu_online_map; 136 EXPORT_SYMBOL(cpu_online_map); 137 cpumask_t cpu_possible_map = CPU_MASK_NONE; 138 EXPORT_SYMBOL(cpu_possible_map); 139 140 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 141 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 142 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 143 144 int smp_num_siblings = 1; 145 146 /* which logical CPU number maps to which CPU (physical APIC ID) */ 147 volatile int ia64_cpu_to_sapicid[NR_CPUS]; 148 EXPORT_SYMBOL(ia64_cpu_to_sapicid); 149 150 static volatile cpumask_t cpu_callin_map; 151 152 struct smp_boot_data smp_boot_data __initdata; 153 154 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */ 155 156 char __initdata no_int_routing; 157 158 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */ 159 160 #ifdef CONFIG_FORCE_CPEI_RETARGET 161 #define CPEI_OVERRIDE_DEFAULT (1) 162 #else 163 #define CPEI_OVERRIDE_DEFAULT (0) 164 #endif 165 166 unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT; 167 168 static int __init 169 cmdl_force_cpei(char *str) 170 { 171 int value=0; 172 173 get_option (&str, &value); 174 force_cpei_retarget = value; 175 176 return 1; 177 } 178 179 __setup("force_cpei=", cmdl_force_cpei); 180 181 static int __init 182 nointroute (char *str) 183 { 184 no_int_routing = 1; 185 printk ("no_int_routing on\n"); 186 return 1; 187 } 188 189 __setup("nointroute", nointroute); 190 191 static void fix_b0_for_bsp(void) 192 { 193 #ifdef CONFIG_HOTPLUG_CPU 194 int cpuid; 195 static int fix_bsp_b0 = 1; 196 197 cpuid = smp_processor_id(); 198 199 /* 200 * Cache the b0 value on the first AP that comes up 201 */ 202 if (!(fix_bsp_b0 && cpuid)) 203 return; 204 205 sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; 206 printk ("Fixed BSP b0 value from CPU %d\n", cpuid); 207 208 fix_bsp_b0 = 0; 209 #endif 210 } 211 212 void 213 sync_master (void *arg) 214 { 215 unsigned long flags, i; 216 217 go[MASTER] = 0; 218 219 local_irq_save(flags); 220 { 221 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { 222 while (!go[MASTER]) 223 cpu_relax(); 224 go[MASTER] = 0; 225 go[SLAVE] = ia64_get_itc(); 226 } 227 } 228 local_irq_restore(flags); 229 } 230 231 /* 232 * Return the number of cycles by which our itc differs from the itc on the master 233 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master, 234 * negative that it is behind. 235 */ 236 static inline long 237 get_delta (long *rt, long *master) 238 { 239 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0; 240 unsigned long tcenter, t0, t1, tm; 241 long i; 242 243 for (i = 0; i < NUM_ITERS; ++i) { 244 t0 = ia64_get_itc(); 245 go[MASTER] = 1; 246 while (!(tm = go[SLAVE])) 247 cpu_relax(); 248 go[SLAVE] = 0; 249 t1 = ia64_get_itc(); 250 251 if (t1 - t0 < best_t1 - best_t0) 252 best_t0 = t0, best_t1 = t1, best_tm = tm; 253 } 254 255 *rt = best_t1 - best_t0; 256 *master = best_tm - best_t0; 257 258 /* average best_t0 and best_t1 without overflow: */ 259 tcenter = (best_t0/2 + best_t1/2); 260 if (best_t0 % 2 + best_t1 % 2 == 2) 261 ++tcenter; 262 return tcenter - best_tm; 263 } 264 265 /* 266 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU 267 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of 268 * unaccounted-for errors (such as getting a machine check in the middle of a calibration 269 * step). The basic idea is for the slave to ask the master what itc value it has and to 270 * read its own itc before and after the master responds. Each iteration gives us three 271 * timestamps: 272 * 273 * slave master 274 * 275 * t0 ---\ 276 * ---\ 277 * ---> 278 * tm 279 * /--- 280 * /--- 281 * t1 <--- 282 * 283 * 284 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0 285 * and t1. If we achieve this, the clocks are synchronized provided the interconnect 286 * between the slave and the master is symmetric. Even if the interconnect were 287 * asymmetric, we would still know that the synchronization error is smaller than the 288 * roundtrip latency (t0 - t1). 289 * 290 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to 291 * within one or two cycles. However, we can only *guarantee* that the synchronization is 292 * accurate to within a round-trip time, which is typically in the range of several 293 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually 294 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better 295 * than half a micro second or so. 296 */ 297 void 298 ia64_sync_itc (unsigned int master) 299 { 300 long i, delta, adj, adjust_latency = 0, done = 0; 301 unsigned long flags, rt, master_time_stamp, bound; 302 #if DEBUG_ITC_SYNC 303 struct { 304 long rt; /* roundtrip time */ 305 long master; /* master's timestamp */ 306 long diff; /* difference between midpoint and master's timestamp */ 307 long lat; /* estimate of itc adjustment latency */ 308 } t[NUM_ROUNDS]; 309 #endif 310 311 /* 312 * Make sure local timer ticks are disabled while we sync. If 313 * they were enabled, we'd have to worry about nasty issues 314 * like setting the ITC ahead of (or a long time before) the 315 * next scheduled tick. 316 */ 317 BUG_ON((ia64_get_itv() & (1 << 16)) == 0); 318 319 go[MASTER] = 1; 320 321 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { 322 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); 323 return; 324 } 325 326 while (go[MASTER]) 327 cpu_relax(); /* wait for master to be ready */ 328 329 spin_lock_irqsave(&itc_sync_lock, flags); 330 { 331 for (i = 0; i < NUM_ROUNDS; ++i) { 332 delta = get_delta(&rt, &master_time_stamp); 333 if (delta == 0) { 334 done = 1; /* let's lock on to this... */ 335 bound = rt; 336 } 337 338 if (!done) { 339 if (i > 0) { 340 adjust_latency += -delta; 341 adj = -delta + adjust_latency/4; 342 } else 343 adj = -delta; 344 345 ia64_set_itc(ia64_get_itc() + adj); 346 } 347 #if DEBUG_ITC_SYNC 348 t[i].rt = rt; 349 t[i].master = master_time_stamp; 350 t[i].diff = delta; 351 t[i].lat = adjust_latency/4; 352 #endif 353 } 354 } 355 spin_unlock_irqrestore(&itc_sync_lock, flags); 356 357 #if DEBUG_ITC_SYNC 358 for (i = 0; i < NUM_ROUNDS; ++i) 359 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n", 360 t[i].rt, t[i].master, t[i].diff, t[i].lat); 361 #endif 362 363 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, " 364 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt); 365 } 366 367 /* 368 * Ideally sets up per-cpu profiling hooks. Doesn't do much now... 369 */ 370 static inline void __devinit 371 smp_setup_percpu_timer (void) 372 { 373 } 374 375 static void __cpuinit 376 smp_callin (void) 377 { 378 int cpuid, phys_id, itc_master; 379 struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo; 380 extern void ia64_init_itm(void); 381 extern volatile int time_keeper_id; 382 383 #ifdef CONFIG_PERFMON 384 extern void pfm_init_percpu(void); 385 #endif 386 387 cpuid = smp_processor_id(); 388 phys_id = hard_smp_processor_id(); 389 itc_master = time_keeper_id; 390 391 if (cpu_online(cpuid)) { 392 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n", 393 phys_id, cpuid); 394 BUG(); 395 } 396 397 fix_b0_for_bsp(); 398 399 ipi_call_lock_irq(); 400 spin_lock(&vector_lock); 401 /* Setup the per cpu irq handling data structures */ 402 __setup_vector_irq(cpuid); 403 cpu_set(cpuid, cpu_online_map); 404 per_cpu(cpu_state, cpuid) = CPU_ONLINE; 405 spin_unlock(&vector_lock); 406 ipi_call_unlock_irq(); 407 408 smp_setup_percpu_timer(); 409 410 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */ 411 412 #ifdef CONFIG_PERFMON 413 pfm_init_percpu(); 414 #endif 415 416 local_irq_enable(); 417 418 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { 419 /* 420 * Synchronize the ITC with the BP. Need to do this after irqs are 421 * enabled because ia64_sync_itc() calls smp_call_function_single(), which 422 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls 423 * local_bh_enable(), which bugs out if irqs are not enabled... 424 */ 425 Dprintk("Going to syncup ITC with ITC Master.\n"); 426 ia64_sync_itc(itc_master); 427 } 428 429 /* 430 * Get our bogomips. 431 */ 432 ia64_init_itm(); 433 434 /* 435 * Delay calibration can be skipped if new processor is identical to the 436 * previous processor. 437 */ 438 last_cpuinfo = cpu_data(cpuid - 1); 439 this_cpuinfo = local_cpu_data; 440 if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq || 441 last_cpuinfo->proc_freq != this_cpuinfo->proc_freq || 442 last_cpuinfo->features != this_cpuinfo->features || 443 last_cpuinfo->revision != this_cpuinfo->revision || 444 last_cpuinfo->family != this_cpuinfo->family || 445 last_cpuinfo->archrev != this_cpuinfo->archrev || 446 last_cpuinfo->model != this_cpuinfo->model) 447 calibrate_delay(); 448 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 449 450 #ifdef CONFIG_IA32_SUPPORT 451 ia32_gdt_init(); 452 #endif 453 454 /* 455 * Allow the master to continue. 456 */ 457 cpu_set(cpuid, cpu_callin_map); 458 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); 459 } 460 461 462 /* 463 * Activate a secondary processor. head.S calls this. 464 */ 465 int __cpuinit 466 start_secondary (void *unused) 467 { 468 /* Early console may use I/O ports */ 469 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 470 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); 471 efi_map_pal_code(); 472 cpu_init(); 473 preempt_disable(); 474 smp_callin(); 475 476 cpu_idle(); 477 return 0; 478 } 479 480 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 481 { 482 return NULL; 483 } 484 485 struct create_idle { 486 struct work_struct work; 487 struct task_struct *idle; 488 struct completion done; 489 int cpu; 490 }; 491 492 void __cpuinit 493 do_fork_idle(struct work_struct *work) 494 { 495 struct create_idle *c_idle = 496 container_of(work, struct create_idle, work); 497 498 c_idle->idle = fork_idle(c_idle->cpu); 499 complete(&c_idle->done); 500 } 501 502 static int __cpuinit 503 do_boot_cpu (int sapicid, int cpu) 504 { 505 int timeout; 506 struct create_idle c_idle = { 507 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), 508 .cpu = cpu, 509 .done = COMPLETION_INITIALIZER(c_idle.done), 510 }; 511 512 c_idle.idle = get_idle_for_cpu(cpu); 513 if (c_idle.idle) { 514 init_idle(c_idle.idle, cpu); 515 goto do_rest; 516 } 517 518 /* 519 * We can't use kernel_thread since we must avoid to reschedule the child. 520 */ 521 if (!keventd_up() || current_is_keventd()) 522 c_idle.work.func(&c_idle.work); 523 else { 524 schedule_work(&c_idle.work); 525 wait_for_completion(&c_idle.done); 526 } 527 528 if (IS_ERR(c_idle.idle)) 529 panic("failed fork for CPU %d", cpu); 530 531 set_idle_for_cpu(cpu, c_idle.idle); 532 533 do_rest: 534 task_for_booting_cpu = c_idle.idle; 535 536 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid); 537 538 set_brendez_area(cpu); 539 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0); 540 541 /* 542 * Wait 10s total for the AP to start 543 */ 544 Dprintk("Waiting on callin_map ..."); 545 for (timeout = 0; timeout < 100000; timeout++) { 546 if (cpu_isset(cpu, cpu_callin_map)) 547 break; /* It has booted */ 548 udelay(100); 549 } 550 Dprintk("\n"); 551 552 if (!cpu_isset(cpu, cpu_callin_map)) { 553 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid); 554 ia64_cpu_to_sapicid[cpu] = -1; 555 cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */ 556 return -EINVAL; 557 } 558 return 0; 559 } 560 561 static int __init 562 decay (char *str) 563 { 564 int ticks; 565 get_option (&str, &ticks); 566 return 1; 567 } 568 569 __setup("decay=", decay); 570 571 /* 572 * Initialize the logical CPU number to SAPICID mapping 573 */ 574 void __init 575 smp_build_cpu_map (void) 576 { 577 int sapicid, cpu, i; 578 int boot_cpu_id = hard_smp_processor_id(); 579 580 for (cpu = 0; cpu < NR_CPUS; cpu++) { 581 ia64_cpu_to_sapicid[cpu] = -1; 582 } 583 584 ia64_cpu_to_sapicid[0] = boot_cpu_id; 585 cpus_clear(cpu_present_map); 586 cpu_set(0, cpu_present_map); 587 cpu_set(0, cpu_possible_map); 588 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { 589 sapicid = smp_boot_data.cpu_phys_id[i]; 590 if (sapicid == boot_cpu_id) 591 continue; 592 cpu_set(cpu, cpu_present_map); 593 cpu_set(cpu, cpu_possible_map); 594 ia64_cpu_to_sapicid[cpu] = sapicid; 595 cpu++; 596 } 597 } 598 599 /* 600 * Cycle through the APs sending Wakeup IPIs to boot each. 601 */ 602 void __init 603 smp_prepare_cpus (unsigned int max_cpus) 604 { 605 int boot_cpu_id = hard_smp_processor_id(); 606 607 /* 608 * Initialize the per-CPU profiling counter/multiplier 609 */ 610 611 smp_setup_percpu_timer(); 612 613 /* 614 * We have the boot CPU online for sure. 615 */ 616 cpu_set(0, cpu_online_map); 617 cpu_set(0, cpu_callin_map); 618 619 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 620 ia64_cpu_to_sapicid[0] = boot_cpu_id; 621 622 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); 623 624 current_thread_info()->cpu = 0; 625 626 /* 627 * If SMP should be disabled, then really disable it! 628 */ 629 if (!max_cpus) { 630 printk(KERN_INFO "SMP mode deactivated.\n"); 631 cpus_clear(cpu_online_map); 632 cpus_clear(cpu_present_map); 633 cpus_clear(cpu_possible_map); 634 cpu_set(0, cpu_online_map); 635 cpu_set(0, cpu_present_map); 636 cpu_set(0, cpu_possible_map); 637 return; 638 } 639 } 640 641 void __devinit smp_prepare_boot_cpu(void) 642 { 643 cpu_set(smp_processor_id(), cpu_online_map); 644 cpu_set(smp_processor_id(), cpu_callin_map); 645 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 646 paravirt_post_smp_prepare_boot_cpu(); 647 } 648 649 #ifdef CONFIG_HOTPLUG_CPU 650 static inline void 651 clear_cpu_sibling_map(int cpu) 652 { 653 int i; 654 655 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) 656 cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); 657 for_each_cpu_mask(i, cpu_core_map[cpu]) 658 cpu_clear(cpu, cpu_core_map[i]); 659 660 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; 661 } 662 663 static void 664 remove_siblinginfo(int cpu) 665 { 666 int last = 0; 667 668 if (cpu_data(cpu)->threads_per_core == 1 && 669 cpu_data(cpu)->cores_per_socket == 1) { 670 cpu_clear(cpu, cpu_core_map[cpu]); 671 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); 672 return; 673 } 674 675 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); 676 677 /* remove it from all sibling map's */ 678 clear_cpu_sibling_map(cpu); 679 } 680 681 extern void fixup_irqs(void); 682 683 int migrate_platform_irqs(unsigned int cpu) 684 { 685 int new_cpei_cpu; 686 irq_desc_t *desc = NULL; 687 cpumask_t mask; 688 int retval = 0; 689 690 /* 691 * dont permit CPEI target to removed. 692 */ 693 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) { 694 printk ("CPU (%d) is CPEI Target\n", cpu); 695 if (can_cpei_retarget()) { 696 /* 697 * Now re-target the CPEI to a different processor 698 */ 699 new_cpei_cpu = any_online_cpu(cpu_online_map); 700 mask = cpumask_of_cpu(new_cpei_cpu); 701 set_cpei_target_cpu(new_cpei_cpu); 702 desc = irq_desc + ia64_cpe_irq; 703 /* 704 * Switch for now, immediately, we need to do fake intr 705 * as other interrupts, but need to study CPEI behaviour with 706 * polling before making changes. 707 */ 708 if (desc) { 709 desc->chip->disable(ia64_cpe_irq); 710 desc->chip->set_affinity(ia64_cpe_irq, mask); 711 desc->chip->enable(ia64_cpe_irq); 712 printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu); 713 } 714 } 715 if (!desc) { 716 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu); 717 retval = -EBUSY; 718 } 719 } 720 return retval; 721 } 722 723 /* must be called with cpucontrol mutex held */ 724 int __cpu_disable(void) 725 { 726 int cpu = smp_processor_id(); 727 728 /* 729 * dont permit boot processor for now 730 */ 731 if (cpu == 0 && !bsp_remove_ok) { 732 printk ("Your platform does not support removal of BSP\n"); 733 return (-EBUSY); 734 } 735 736 if (ia64_platform_is("sn2")) { 737 if (!sn_cpu_disable_allowed(cpu)) 738 return -EBUSY; 739 } 740 741 cpu_clear(cpu, cpu_online_map); 742 743 if (migrate_platform_irqs(cpu)) { 744 cpu_set(cpu, cpu_online_map); 745 return (-EBUSY); 746 } 747 748 remove_siblinginfo(cpu); 749 cpu_clear(cpu, cpu_online_map); 750 fixup_irqs(); 751 local_flush_tlb_all(); 752 cpu_clear(cpu, cpu_callin_map); 753 return 0; 754 } 755 756 void __cpu_die(unsigned int cpu) 757 { 758 unsigned int i; 759 760 for (i = 0; i < 100; i++) { 761 /* They ack this in play_dead by setting CPU_DEAD */ 762 if (per_cpu(cpu_state, cpu) == CPU_DEAD) 763 { 764 printk ("CPU %d is now offline\n", cpu); 765 return; 766 } 767 msleep(100); 768 } 769 printk(KERN_ERR "CPU %u didn't die...\n", cpu); 770 } 771 #endif /* CONFIG_HOTPLUG_CPU */ 772 773 void 774 smp_cpus_done (unsigned int dummy) 775 { 776 int cpu; 777 unsigned long bogosum = 0; 778 779 /* 780 * Allow the user to impress friends. 781 */ 782 783 for_each_online_cpu(cpu) { 784 bogosum += cpu_data(cpu)->loops_per_jiffy; 785 } 786 787 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 788 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); 789 } 790 791 static inline void __devinit 792 set_cpu_sibling_map(int cpu) 793 { 794 int i; 795 796 for_each_online_cpu(i) { 797 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) { 798 cpu_set(i, cpu_core_map[cpu]); 799 cpu_set(cpu, cpu_core_map[i]); 800 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 801 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 802 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 803 } 804 } 805 } 806 } 807 808 int __cpuinit 809 __cpu_up (unsigned int cpu) 810 { 811 int ret; 812 int sapicid; 813 814 sapicid = ia64_cpu_to_sapicid[cpu]; 815 if (sapicid == -1) 816 return -EINVAL; 817 818 /* 819 * Already booted cpu? not valid anymore since we dont 820 * do idle loop tightspin anymore. 821 */ 822 if (cpu_isset(cpu, cpu_callin_map)) 823 return -EINVAL; 824 825 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 826 /* Processor goes to start_secondary(), sets online flag */ 827 ret = do_boot_cpu(sapicid, cpu); 828 if (ret < 0) 829 return ret; 830 831 if (cpu_data(cpu)->threads_per_core == 1 && 832 cpu_data(cpu)->cores_per_socket == 1) { 833 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 834 cpu_set(cpu, cpu_core_map[cpu]); 835 return 0; 836 } 837 838 set_cpu_sibling_map(cpu); 839 840 return 0; 841 } 842 843 /* 844 * Assume that CPUs have been discovered by some platform-dependent interface. For 845 * SoftSDV/Lion, that would be ACPI. 846 * 847 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP(). 848 */ 849 void __init 850 init_smp_config(void) 851 { 852 struct fptr { 853 unsigned long fp; 854 unsigned long gp; 855 } *ap_startup; 856 long sal_ret; 857 858 /* Tell SAL where to drop the APs. */ 859 ap_startup = (struct fptr *) start_ap; 860 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ, 861 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0); 862 if (sal_ret < 0) 863 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n", 864 ia64_sal_strerror(sal_ret)); 865 } 866 867 /* 868 * identify_siblings(cpu) gets called from identify_cpu. This populates the 869 * information related to logical execution units in per_cpu_data structure. 870 */ 871 void __devinit 872 identify_siblings(struct cpuinfo_ia64 *c) 873 { 874 s64 status; 875 u16 pltid; 876 pal_logical_to_physical_t info; 877 878 status = ia64_pal_logical_to_phys(-1, &info); 879 if (status != PAL_STATUS_SUCCESS) { 880 if (status != PAL_STATUS_UNIMPLEMENTED) { 881 printk(KERN_ERR 882 "ia64_pal_logical_to_phys failed with %ld\n", 883 status); 884 return; 885 } 886 887 info.overview_ppid = 0; 888 info.overview_cpp = 1; 889 info.overview_tpc = 1; 890 } 891 892 status = ia64_sal_physical_id_info(&pltid); 893 if (status != PAL_STATUS_SUCCESS) { 894 if (status != PAL_STATUS_UNIMPLEMENTED) 895 printk(KERN_ERR 896 "ia64_sal_pltid failed with %ld\n", 897 status); 898 return; 899 } 900 901 c->socket_id = (pltid << 8) | info.overview_ppid; 902 903 if (info.overview_cpp == 1 && info.overview_tpc == 1) 904 return; 905 906 c->cores_per_socket = info.overview_cpp; 907 c->threads_per_core = info.overview_tpc; 908 c->num_log = info.overview_num_log; 909 910 c->core_id = info.log1_cid; 911 c->thread_id = info.log1_tid; 912 } 913 914 /* 915 * returns non zero, if multi-threading is enabled 916 * on at least one physical package. Due to hotplug cpu 917 * and (maxcpus=), all threads may not necessarily be enabled 918 * even though the processor supports multi-threading. 919 */ 920 int is_multithreading_enabled(void) 921 { 922 int i, j; 923 924 for_each_present_cpu(i) { 925 for_each_present_cpu(j) { 926 if (j == i) 927 continue; 928 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) { 929 if (cpu_data(j)->core_id == cpu_data(i)->core_id) 930 return 1; 931 } 932 } 933 } 934 return 0; 935 } 936 EXPORT_SYMBOL_GPL(is_multithreading_enabled); 937