1 /* 2 * x86 SMP booting functions 3 * 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 6 * Copyright 2001 Andi Kleen, SuSE Labs. 7 * 8 * Much of the core SMP work is based on previous work by Thomas Radke, to 9 * whom a great many thanks are extended. 10 * 11 * Thanks to Intel for making available several different Pentium, 12 * Pentium Pro and Pentium-II/Xeon MP machines. 13 * Original development of Linux SMP code supported by Caldera. 14 * 15 * This code is released under the GNU General Public License version 2 or 16 * later. 17 * 18 * Fixes 19 * Felix Koop : NR_CPUS used properly 20 * Jose Renau : Handle single CPU case. 21 * Alan Cox : By repeated request 8) - Total BogoMIPS report. 22 * Greg Wright : Fix for kernel stacks panic. 23 * Erich Boleyn : MP v1.4 and additional changes. 24 * Matthias Sattler : Changes for 2.1 kernel map. 25 * Michel Lespinasse : Changes for 2.1 kernel map. 26 * Michael Chastain : Change trampoline.S to gnu as. 27 * Alan Cox : Dumb bug: 'B' step PPro's are fine 28 * Ingo Molnar : Added APIC timers, based on code 29 * from Jose Renau 30 * Ingo Molnar : various cleanups and rewrites 31 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. 32 * Maciej W. Rozycki : Bits for genuine 82489DX APICs 33 * Andi Kleen : Changed for SMP boot into long mode. 34 * Martin J. Bligh : Added support for multi-quad systems 35 * Dave Jones : Report invalid combinations of Athlon CPUs. 36 * Rusty Russell : Hacked into shape for new "hotplug" boot process. 37 * Andi Kleen : Converted to new state machine. 38 * Ashok Raj : CPU hotplug support 39 * Glauber Costa : i386 and x86_64 integration 40 */ 41 42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 43 44 #include <linux/init.h> 45 #include <linux/smp.h> 46 #include <linux/export.h> 47 #include <linux/sched.h> 48 #include <linux/sched/topology.h> 49 #include <linux/sched/hotplug.h> 50 #include <linux/sched/task_stack.h> 51 #include <linux/percpu.h> 52 #include <linux/bootmem.h> 53 #include <linux/err.h> 54 #include <linux/nmi.h> 55 #include <linux/tboot.h> 56 #include <linux/stackprotector.h> 57 #include <linux/gfp.h> 58 #include <linux/cpuidle.h> 59 60 #include <asm/acpi.h> 61 #include <asm/desc.h> 62 #include <asm/nmi.h> 63 #include <asm/irq.h> 64 #include <asm/realmode.h> 65 #include <asm/cpu.h> 66 #include <asm/numa.h> 67 #include <asm/pgtable.h> 68 #include <asm/tlbflush.h> 69 #include <asm/mtrr.h> 70 #include <asm/mwait.h> 71 #include <asm/apic.h> 72 #include <asm/io_apic.h> 73 #include <asm/fpu/internal.h> 74 #include <asm/setup.h> 75 #include <asm/uv/uv.h> 76 #include <linux/mc146818rtc.h> 77 #include <asm/i8259.h> 78 #include <asm/misc.h> 79 #include <asm/qspinlock.h> 80 #include <asm/intel-family.h> 81 #include <asm/cpu_device_id.h> 82 #include <asm/spec-ctrl.h> 83 #include <asm/hw_irq.h> 84 85 /* representing HT siblings of each logical CPU */ 86 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 87 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 88 89 /* representing HT and core siblings of each logical CPU */ 90 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 91 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 92 93 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); 94 95 /* Per CPU bogomips and other parameters */ 96 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 97 EXPORT_PER_CPU_SYMBOL(cpu_info); 98 99 /* Logical package management. We might want to allocate that dynamically */ 100 unsigned int __max_logical_packages __read_mostly; 101 EXPORT_SYMBOL(__max_logical_packages); 102 static unsigned int logical_packages __read_mostly; 103 104 /* Maximum number of SMT threads on any online core */ 105 int __read_mostly __max_smt_threads = 1; 106 107 /* Flag to indicate if a complete sched domain rebuild is required */ 108 bool x86_topology_update; 109 110 int arch_update_cpu_topology(void) 111 { 112 int retval = x86_topology_update; 113 114 x86_topology_update = false; 115 return retval; 116 } 117 118 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 119 { 120 unsigned long flags; 121 122 spin_lock_irqsave(&rtc_lock, flags); 123 CMOS_WRITE(0xa, 0xf); 124 spin_unlock_irqrestore(&rtc_lock, flags); 125 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 126 start_eip >> 4; 127 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 128 start_eip & 0xf; 129 } 130 131 static inline void smpboot_restore_warm_reset_vector(void) 132 { 133 unsigned long flags; 134 135 /* 136 * Paranoid: Set warm reset code and vector here back 137 * to default values. 138 */ 139 spin_lock_irqsave(&rtc_lock, flags); 140 CMOS_WRITE(0, 0xf); 141 spin_unlock_irqrestore(&rtc_lock, flags); 142 143 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 144 } 145 146 /* 147 * Report back to the Boot Processor during boot time or to the caller processor 148 * during CPU online. 149 */ 150 static void smp_callin(void) 151 { 152 int cpuid, phys_id; 153 154 /* 155 * If waken up by an INIT in an 82489DX configuration 156 * cpu_callout_mask guarantees we don't get here before 157 * an INIT_deassert IPI reaches our local APIC, so it is 158 * now safe to touch our local APIC. 159 */ 160 cpuid = smp_processor_id(); 161 162 /* 163 * (This works even if the APIC is not enabled.) 164 */ 165 phys_id = read_apic_id(); 166 167 /* 168 * the boot CPU has finished the init stage and is spinning 169 * on callin_map until we finish. We are free to set up this 170 * CPU, first the APIC. (this is probably redundant on most 171 * boards) 172 */ 173 apic_ap_setup(); 174 175 /* 176 * Save our processor parameters. Note: this information 177 * is needed for clock calibration. 178 */ 179 smp_store_cpu_info(cpuid); 180 181 /* 182 * The topology information must be up to date before 183 * calibrate_delay() and notify_cpu_starting(). 184 */ 185 set_cpu_sibling_map(raw_smp_processor_id()); 186 187 /* 188 * Get our bogomips. 189 * Update loops_per_jiffy in cpu_data. Previous call to 190 * smp_store_cpu_info() stored a value that is close but not as 191 * accurate as the value just calculated. 192 */ 193 calibrate_delay(); 194 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; 195 pr_debug("Stack at about %p\n", &cpuid); 196 197 wmb(); 198 199 notify_cpu_starting(cpuid); 200 201 /* 202 * Allow the master to continue. 203 */ 204 cpumask_set_cpu(cpuid, cpu_callin_mask); 205 } 206 207 static int cpu0_logical_apicid; 208 static int enable_start_cpu0; 209 /* 210 * Activate a secondary processor. 211 */ 212 static void notrace start_secondary(void *unused) 213 { 214 /* 215 * Don't put *anything* except direct CPU state initialization 216 * before cpu_init(), SMP booting is too fragile that we want to 217 * limit the things done here to the most necessary things. 218 */ 219 if (boot_cpu_has(X86_FEATURE_PCID)) 220 __write_cr4(__read_cr4() | X86_CR4_PCIDE); 221 222 #ifdef CONFIG_X86_32 223 /* switch away from the initial page table */ 224 load_cr3(swapper_pg_dir); 225 /* 226 * Initialize the CR4 shadow before doing anything that could 227 * try to read it. 228 */ 229 cr4_init_shadow(); 230 __flush_tlb_all(); 231 #endif 232 load_current_idt(); 233 cpu_init(); 234 x86_cpuinit.early_percpu_clock_init(); 235 preempt_disable(); 236 smp_callin(); 237 238 enable_start_cpu0 = 0; 239 240 /* otherwise gcc will move up smp_processor_id before the cpu_init */ 241 barrier(); 242 /* 243 * Check TSC synchronization with the boot CPU: 244 */ 245 check_tsc_sync_target(); 246 247 speculative_store_bypass_ht_init(); 248 249 /* 250 * Lock vector_lock, set CPU online and bring the vector 251 * allocator online. Online must be set with vector_lock held 252 * to prevent a concurrent irq setup/teardown from seeing a 253 * half valid vector space. 254 */ 255 lock_vector_lock(); 256 set_cpu_online(smp_processor_id(), true); 257 lapic_online(); 258 unlock_vector_lock(); 259 cpu_set_state_online(smp_processor_id()); 260 x86_platform.nmi_init(); 261 262 /* enable local interrupts */ 263 local_irq_enable(); 264 265 /* to prevent fake stack check failure in clock setup */ 266 boot_init_stack_canary(); 267 268 x86_cpuinit.setup_percpu_clockev(); 269 270 wmb(); 271 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 272 } 273 274 /** 275 * topology_is_primary_thread - Check whether CPU is the primary SMT thread 276 * @cpu: CPU to check 277 */ 278 bool topology_is_primary_thread(unsigned int cpu) 279 { 280 return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu)); 281 } 282 283 /** 284 * topology_smt_supported - Check whether SMT is supported by the CPUs 285 */ 286 bool topology_smt_supported(void) 287 { 288 return smp_num_siblings > 1; 289 } 290 291 /** 292 * topology_phys_to_logical_pkg - Map a physical package id to a logical 293 * 294 * Returns logical package id or -1 if not found 295 */ 296 int topology_phys_to_logical_pkg(unsigned int phys_pkg) 297 { 298 int cpu; 299 300 for_each_possible_cpu(cpu) { 301 struct cpuinfo_x86 *c = &cpu_data(cpu); 302 303 if (c->initialized && c->phys_proc_id == phys_pkg) 304 return c->logical_proc_id; 305 } 306 return -1; 307 } 308 EXPORT_SYMBOL(topology_phys_to_logical_pkg); 309 310 /** 311 * topology_update_package_map - Update the physical to logical package map 312 * @pkg: The physical package id as retrieved via CPUID 313 * @cpu: The cpu for which this is updated 314 */ 315 int topology_update_package_map(unsigned int pkg, unsigned int cpu) 316 { 317 int new; 318 319 /* Already available somewhere? */ 320 new = topology_phys_to_logical_pkg(pkg); 321 if (new >= 0) 322 goto found; 323 324 new = logical_packages++; 325 if (new != pkg) { 326 pr_info("CPU %u Converting physical %u to logical package %u\n", 327 cpu, pkg, new); 328 } 329 found: 330 cpu_data(cpu).logical_proc_id = new; 331 return 0; 332 } 333 334 void __init smp_store_boot_cpu_info(void) 335 { 336 int id = 0; /* CPU 0 */ 337 struct cpuinfo_x86 *c = &cpu_data(id); 338 339 *c = boot_cpu_data; 340 c->cpu_index = id; 341 topology_update_package_map(c->phys_proc_id, id); 342 c->initialized = true; 343 } 344 345 /* 346 * The bootstrap kernel entry code has set these up. Save them for 347 * a given CPU 348 */ 349 void smp_store_cpu_info(int id) 350 { 351 struct cpuinfo_x86 *c = &cpu_data(id); 352 353 /* Copy boot_cpu_data only on the first bringup */ 354 if (!c->initialized) 355 *c = boot_cpu_data; 356 c->cpu_index = id; 357 /* 358 * During boot time, CPU0 has this setup already. Save the info when 359 * bringing up AP or offlined CPU0. 360 */ 361 identify_secondary_cpu(c); 362 c->initialized = true; 363 } 364 365 static bool 366 topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 367 { 368 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 369 370 return (cpu_to_node(cpu1) == cpu_to_node(cpu2)); 371 } 372 373 static bool 374 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 375 { 376 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 377 378 return !WARN_ONCE(!topology_same_node(c, o), 379 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " 380 "[node: %d != %d]. Ignoring dependency.\n", 381 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); 382 } 383 384 #define link_mask(mfunc, c1, c2) \ 385 do { \ 386 cpumask_set_cpu((c1), mfunc(c2)); \ 387 cpumask_set_cpu((c2), mfunc(c1)); \ 388 } while (0) 389 390 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 391 { 392 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 393 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 394 395 if (c->phys_proc_id == o->phys_proc_id && 396 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { 397 if (c->cpu_core_id == o->cpu_core_id) 398 return topology_sane(c, o, "smt"); 399 400 if ((c->cu_id != 0xff) && 401 (o->cu_id != 0xff) && 402 (c->cu_id == o->cu_id)) 403 return topology_sane(c, o, "smt"); 404 } 405 406 } else if (c->phys_proc_id == o->phys_proc_id && 407 c->cpu_core_id == o->cpu_core_id) { 408 return topology_sane(c, o, "smt"); 409 } 410 411 return false; 412 } 413 414 /* 415 * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. 416 * 417 * These are Intel CPUs that enumerate an LLC that is shared by 418 * multiple NUMA nodes. The LLC on these systems is shared for 419 * off-package data access but private to the NUMA node (half 420 * of the package) for on-package access. 421 * 422 * CPUID (the source of the information about the LLC) can only 423 * enumerate the cache as being shared *or* unshared, but not 424 * this particular configuration. The CPU in this case enumerates 425 * the cache to be shared across the entire package (spanning both 426 * NUMA nodes). 427 */ 428 429 static const struct x86_cpu_id snc_cpu[] = { 430 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, 431 {} 432 }; 433 434 static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 435 { 436 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 437 438 /* Do not match if we do not have a valid APICID for cpu: */ 439 if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) 440 return false; 441 442 /* Do not match if LLC id does not match: */ 443 if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) 444 return false; 445 446 /* 447 * Allow the SNC topology without warning. Return of false 448 * means 'c' does not share the LLC of 'o'. This will be 449 * reflected to userspace. 450 */ 451 if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) 452 return false; 453 454 return topology_sane(c, o, "llc"); 455 } 456 457 /* 458 * Unlike the other levels, we do not enforce keeping a 459 * multicore group inside a NUMA node. If this happens, we will 460 * discard the MC level of the topology later. 461 */ 462 static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 463 { 464 if (c->phys_proc_id == o->phys_proc_id) 465 return true; 466 return false; 467 } 468 469 #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) 470 static inline int x86_sched_itmt_flags(void) 471 { 472 return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; 473 } 474 475 #ifdef CONFIG_SCHED_MC 476 static int x86_core_flags(void) 477 { 478 return cpu_core_flags() | x86_sched_itmt_flags(); 479 } 480 #endif 481 #ifdef CONFIG_SCHED_SMT 482 static int x86_smt_flags(void) 483 { 484 return cpu_smt_flags() | x86_sched_itmt_flags(); 485 } 486 #endif 487 #endif 488 489 static struct sched_domain_topology_level x86_numa_in_package_topology[] = { 490 #ifdef CONFIG_SCHED_SMT 491 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, 492 #endif 493 #ifdef CONFIG_SCHED_MC 494 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, 495 #endif 496 { NULL, }, 497 }; 498 499 static struct sched_domain_topology_level x86_topology[] = { 500 #ifdef CONFIG_SCHED_SMT 501 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, 502 #endif 503 #ifdef CONFIG_SCHED_MC 504 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, 505 #endif 506 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 507 { NULL, }, 508 }; 509 510 /* 511 * Set if a package/die has multiple NUMA nodes inside. 512 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel 513 * Sub-NUMA Clustering have this. 514 */ 515 static bool x86_has_numa_in_package; 516 517 void set_cpu_sibling_map(int cpu) 518 { 519 bool has_smt = smp_num_siblings > 1; 520 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 521 struct cpuinfo_x86 *c = &cpu_data(cpu); 522 struct cpuinfo_x86 *o; 523 int i, threads; 524 525 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 526 527 if (!has_mp) { 528 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); 529 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 530 cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); 531 c->booted_cores = 1; 532 return; 533 } 534 535 for_each_cpu(i, cpu_sibling_setup_mask) { 536 o = &cpu_data(i); 537 538 if ((i == cpu) || (has_smt && match_smt(c, o))) 539 link_mask(topology_sibling_cpumask, cpu, i); 540 541 if ((i == cpu) || (has_mp && match_llc(c, o))) 542 link_mask(cpu_llc_shared_mask, cpu, i); 543 544 } 545 546 /* 547 * This needs a separate iteration over the cpus because we rely on all 548 * topology_sibling_cpumask links to be set-up. 549 */ 550 for_each_cpu(i, cpu_sibling_setup_mask) { 551 o = &cpu_data(i); 552 553 if ((i == cpu) || (has_mp && match_die(c, o))) { 554 link_mask(topology_core_cpumask, cpu, i); 555 556 /* 557 * Does this new cpu bringup a new core? 558 */ 559 if (cpumask_weight( 560 topology_sibling_cpumask(cpu)) == 1) { 561 /* 562 * for each core in package, increment 563 * the booted_cores for this new cpu 564 */ 565 if (cpumask_first( 566 topology_sibling_cpumask(i)) == i) 567 c->booted_cores++; 568 /* 569 * increment the core count for all 570 * the other cpus in this package 571 */ 572 if (i != cpu) 573 cpu_data(i).booted_cores++; 574 } else if (i != cpu && !c->booted_cores) 575 c->booted_cores = cpu_data(i).booted_cores; 576 } 577 if (match_die(c, o) && !topology_same_node(c, o)) 578 x86_has_numa_in_package = true; 579 } 580 581 threads = cpumask_weight(topology_sibling_cpumask(cpu)); 582 if (threads > __max_smt_threads) 583 __max_smt_threads = threads; 584 } 585 586 /* maps the cpu to the sched domain representing multi-core */ 587 const struct cpumask *cpu_coregroup_mask(int cpu) 588 { 589 return cpu_llc_shared_mask(cpu); 590 } 591 592 static void impress_friends(void) 593 { 594 int cpu; 595 unsigned long bogosum = 0; 596 /* 597 * Allow the user to impress friends. 598 */ 599 pr_debug("Before bogomips\n"); 600 for_each_possible_cpu(cpu) 601 if (cpumask_test_cpu(cpu, cpu_callout_mask)) 602 bogosum += cpu_data(cpu).loops_per_jiffy; 603 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", 604 num_online_cpus(), 605 bogosum/(500000/HZ), 606 (bogosum/(5000/HZ))%100); 607 608 pr_debug("Before bogocount - setting activated=1\n"); 609 } 610 611 void __inquire_remote_apic(int apicid) 612 { 613 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; 614 const char * const names[] = { "ID", "VERSION", "SPIV" }; 615 int timeout; 616 u32 status; 617 618 pr_info("Inquiring remote APIC 0x%x...\n", apicid); 619 620 for (i = 0; i < ARRAY_SIZE(regs); i++) { 621 pr_info("... APIC 0x%x %s: ", apicid, names[i]); 622 623 /* 624 * Wait for idle. 625 */ 626 status = safe_apic_wait_icr_idle(); 627 if (status) 628 pr_cont("a previous APIC delivery may have failed\n"); 629 630 apic_icr_write(APIC_DM_REMRD | regs[i], apicid); 631 632 timeout = 0; 633 do { 634 udelay(100); 635 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; 636 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); 637 638 switch (status) { 639 case APIC_ICR_RR_VALID: 640 status = apic_read(APIC_RRR); 641 pr_cont("%08x\n", status); 642 break; 643 default: 644 pr_cont("failed\n"); 645 } 646 } 647 } 648 649 /* 650 * The Multiprocessor Specification 1.4 (1997) example code suggests 651 * that there should be a 10ms delay between the BSP asserting INIT 652 * and de-asserting INIT, when starting a remote processor. 653 * But that slows boot and resume on modern processors, which include 654 * many cores and don't require that delay. 655 * 656 * Cmdline "init_cpu_udelay=" is available to over-ride this delay. 657 * Modern processor families are quirked to remove the delay entirely. 658 */ 659 #define UDELAY_10MS_DEFAULT 10000 660 661 static unsigned int init_udelay = UINT_MAX; 662 663 static int __init cpu_init_udelay(char *str) 664 { 665 get_option(&str, &init_udelay); 666 667 return 0; 668 } 669 early_param("cpu_init_udelay", cpu_init_udelay); 670 671 static void __init smp_quirk_init_udelay(void) 672 { 673 /* if cmdline changed it from default, leave it alone */ 674 if (init_udelay != UINT_MAX) 675 return; 676 677 /* if modern processor, use no delay */ 678 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 679 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { 680 init_udelay = 0; 681 return; 682 } 683 /* else, use legacy delay */ 684 init_udelay = UDELAY_10MS_DEFAULT; 685 } 686 687 /* 688 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal 689 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 690 * won't ... remember to clear down the APIC, etc later. 691 */ 692 int 693 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 694 { 695 unsigned long send_status, accept_status = 0; 696 int maxlvt; 697 698 /* Target chip */ 699 /* Boot on the stack */ 700 /* Kick the second */ 701 apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid); 702 703 pr_debug("Waiting for send to finish...\n"); 704 send_status = safe_apic_wait_icr_idle(); 705 706 /* 707 * Give the other CPU some time to accept the IPI. 708 */ 709 udelay(200); 710 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 711 maxlvt = lapic_get_maxlvt(); 712 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 713 apic_write(APIC_ESR, 0); 714 accept_status = (apic_read(APIC_ESR) & 0xEF); 715 } 716 pr_debug("NMI sent\n"); 717 718 if (send_status) 719 pr_err("APIC never delivered???\n"); 720 if (accept_status) 721 pr_err("APIC delivery error (%lx)\n", accept_status); 722 723 return (send_status | accept_status); 724 } 725 726 static int 727 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 728 { 729 unsigned long send_status = 0, accept_status = 0; 730 int maxlvt, num_starts, j; 731 732 maxlvt = lapic_get_maxlvt(); 733 734 /* 735 * Be paranoid about clearing APIC errors. 736 */ 737 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 738 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 739 apic_write(APIC_ESR, 0); 740 apic_read(APIC_ESR); 741 } 742 743 pr_debug("Asserting INIT\n"); 744 745 /* 746 * Turn INIT on target chip 747 */ 748 /* 749 * Send IPI 750 */ 751 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, 752 phys_apicid); 753 754 pr_debug("Waiting for send to finish...\n"); 755 send_status = safe_apic_wait_icr_idle(); 756 757 udelay(init_udelay); 758 759 pr_debug("Deasserting INIT\n"); 760 761 /* Target chip */ 762 /* Send IPI */ 763 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); 764 765 pr_debug("Waiting for send to finish...\n"); 766 send_status = safe_apic_wait_icr_idle(); 767 768 mb(); 769 770 /* 771 * Should we send STARTUP IPIs ? 772 * 773 * Determine this based on the APIC version. 774 * If we don't have an integrated APIC, don't send the STARTUP IPIs. 775 */ 776 if (APIC_INTEGRATED(boot_cpu_apic_version)) 777 num_starts = 2; 778 else 779 num_starts = 0; 780 781 /* 782 * Run STARTUP IPI loop. 783 */ 784 pr_debug("#startup loops: %d\n", num_starts); 785 786 for (j = 1; j <= num_starts; j++) { 787 pr_debug("Sending STARTUP #%d\n", j); 788 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 789 apic_write(APIC_ESR, 0); 790 apic_read(APIC_ESR); 791 pr_debug("After apic_write\n"); 792 793 /* 794 * STARTUP IPI 795 */ 796 797 /* Target chip */ 798 /* Boot on the stack */ 799 /* Kick the second */ 800 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), 801 phys_apicid); 802 803 /* 804 * Give the other CPU some time to accept the IPI. 805 */ 806 if (init_udelay == 0) 807 udelay(10); 808 else 809 udelay(300); 810 811 pr_debug("Startup point 1\n"); 812 813 pr_debug("Waiting for send to finish...\n"); 814 send_status = safe_apic_wait_icr_idle(); 815 816 /* 817 * Give the other CPU some time to accept the IPI. 818 */ 819 if (init_udelay == 0) 820 udelay(10); 821 else 822 udelay(200); 823 824 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 825 apic_write(APIC_ESR, 0); 826 accept_status = (apic_read(APIC_ESR) & 0xEF); 827 if (send_status || accept_status) 828 break; 829 } 830 pr_debug("After Startup\n"); 831 832 if (send_status) 833 pr_err("APIC never delivered???\n"); 834 if (accept_status) 835 pr_err("APIC delivery error (%lx)\n", accept_status); 836 837 return (send_status | accept_status); 838 } 839 840 /* reduce the number of lines printed when booting a large cpu count system */ 841 static void announce_cpu(int cpu, int apicid) 842 { 843 static int current_node = -1; 844 int node = early_cpu_to_node(cpu); 845 static int width, node_width; 846 847 if (!width) 848 width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ 849 850 if (!node_width) 851 node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ 852 853 if (cpu == 1) 854 printk(KERN_INFO "x86: Booting SMP configuration:\n"); 855 856 if (system_state < SYSTEM_RUNNING) { 857 if (node != current_node) { 858 if (current_node > (-1)) 859 pr_cont("\n"); 860 current_node = node; 861 862 printk(KERN_INFO ".... node %*s#%d, CPUs: ", 863 node_width - num_digits(node), " ", node); 864 } 865 866 /* Add padding for the BSP */ 867 if (cpu == 1) 868 pr_cont("%*s", width + 1, " "); 869 870 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); 871 872 } else 873 pr_info("Booting Node %d Processor %d APIC 0x%x\n", 874 node, cpu, apicid); 875 } 876 877 static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) 878 { 879 int cpu; 880 881 cpu = smp_processor_id(); 882 if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0) 883 return NMI_HANDLED; 884 885 return NMI_DONE; 886 } 887 888 /* 889 * Wake up AP by INIT, INIT, STARTUP sequence. 890 * 891 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS 892 * boot-strap code which is not a desired behavior for waking up BSP. To 893 * void the boot-strap code, wake up CPU0 by NMI instead. 894 * 895 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined 896 * (i.e. physically hot removed and then hot added), NMI won't wake it up. 897 * We'll change this code in the future to wake up hard offlined CPU0 if 898 * real platform and request are available. 899 */ 900 static int 901 wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 902 int *cpu0_nmi_registered) 903 { 904 int id; 905 int boot_error; 906 907 preempt_disable(); 908 909 /* 910 * Wake up AP by INIT, INIT, STARTUP sequence. 911 */ 912 if (cpu) { 913 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); 914 goto out; 915 } 916 917 /* 918 * Wake up BSP by nmi. 919 * 920 * Register a NMI handler to help wake up CPU0. 921 */ 922 boot_error = register_nmi_handler(NMI_LOCAL, 923 wakeup_cpu0_nmi, 0, "wake_cpu0"); 924 925 if (!boot_error) { 926 enable_start_cpu0 = 1; 927 *cpu0_nmi_registered = 1; 928 if (apic->dest_logical == APIC_DEST_LOGICAL) 929 id = cpu0_logical_apicid; 930 else 931 id = apicid; 932 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip); 933 } 934 935 out: 936 preempt_enable(); 937 938 return boot_error; 939 } 940 941 void common_cpu_up(unsigned int cpu, struct task_struct *idle) 942 { 943 /* Just in case we booted with a single CPU. */ 944 alternatives_enable_smp(); 945 946 per_cpu(current_task, cpu) = idle; 947 948 #ifdef CONFIG_X86_32 949 /* Stack for startup_32 can be just as for start_secondary onwards */ 950 irq_ctx_init(cpu); 951 per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); 952 #else 953 initial_gs = per_cpu_offset(cpu); 954 #endif 955 } 956 957 /* 958 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 959 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 960 * Returns zero if CPU booted OK, else error code from 961 * ->wakeup_secondary_cpu. 962 */ 963 static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, 964 int *cpu0_nmi_registered) 965 { 966 volatile u32 *trampoline_status = 967 (volatile u32 *) __va(real_mode_header->trampoline_status); 968 /* start_ip had better be page-aligned! */ 969 unsigned long start_ip = real_mode_header->trampoline_start; 970 971 unsigned long boot_error = 0; 972 unsigned long timeout; 973 974 idle->thread.sp = (unsigned long)task_pt_regs(idle); 975 early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu); 976 initial_code = (unsigned long)start_secondary; 977 initial_stack = idle->thread.sp; 978 979 /* Enable the espfix hack for this CPU */ 980 init_espfix_ap(cpu); 981 982 /* So we see what's up */ 983 announce_cpu(cpu, apicid); 984 985 /* 986 * This grunge runs the startup process for 987 * the targeted processor. 988 */ 989 990 if (x86_platform.legacy.warm_reset) { 991 992 pr_debug("Setting warm reset code and vector.\n"); 993 994 smpboot_setup_warm_reset_vector(start_ip); 995 /* 996 * Be paranoid about clearing APIC errors. 997 */ 998 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 999 apic_write(APIC_ESR, 0); 1000 apic_read(APIC_ESR); 1001 } 1002 } 1003 1004 /* 1005 * AP might wait on cpu_callout_mask in cpu_init() with 1006 * cpu_initialized_mask set if previous attempt to online 1007 * it timed-out. Clear cpu_initialized_mask so that after 1008 * INIT/SIPI it could start with a clean state. 1009 */ 1010 cpumask_clear_cpu(cpu, cpu_initialized_mask); 1011 smp_mb(); 1012 1013 /* 1014 * Wake up a CPU in difference cases: 1015 * - Use the method in the APIC driver if it's defined 1016 * Otherwise, 1017 * - Use an INIT boot APIC message for APs or NMI for BSP. 1018 */ 1019 if (apic->wakeup_secondary_cpu) 1020 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); 1021 else 1022 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, 1023 cpu0_nmi_registered); 1024 1025 if (!boot_error) { 1026 /* 1027 * Wait 10s total for first sign of life from AP 1028 */ 1029 boot_error = -1; 1030 timeout = jiffies + 10*HZ; 1031 while (time_before(jiffies, timeout)) { 1032 if (cpumask_test_cpu(cpu, cpu_initialized_mask)) { 1033 /* 1034 * Tell AP to proceed with initialization 1035 */ 1036 cpumask_set_cpu(cpu, cpu_callout_mask); 1037 boot_error = 0; 1038 break; 1039 } 1040 schedule(); 1041 } 1042 } 1043 1044 if (!boot_error) { 1045 /* 1046 * Wait till AP completes initial initialization 1047 */ 1048 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) { 1049 /* 1050 * Allow other tasks to run while we wait for the 1051 * AP to come online. This also gives a chance 1052 * for the MTRR work(triggered by the AP coming online) 1053 * to be completed in the stop machine context. 1054 */ 1055 schedule(); 1056 } 1057 } 1058 1059 /* mark "stuck" area as not stuck */ 1060 *trampoline_status = 0; 1061 1062 if (x86_platform.legacy.warm_reset) { 1063 /* 1064 * Cleanup possible dangling ends... 1065 */ 1066 smpboot_restore_warm_reset_vector(); 1067 } 1068 1069 return boot_error; 1070 } 1071 1072 int native_cpu_up(unsigned int cpu, struct task_struct *tidle) 1073 { 1074 int apicid = apic->cpu_present_to_apicid(cpu); 1075 int cpu0_nmi_registered = 0; 1076 unsigned long flags; 1077 int err, ret = 0; 1078 1079 lockdep_assert_irqs_enabled(); 1080 1081 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); 1082 1083 if (apicid == BAD_APICID || 1084 !physid_isset(apicid, phys_cpu_present_map) || 1085 !apic->apic_id_valid(apicid)) { 1086 pr_err("%s: bad cpu %d\n", __func__, cpu); 1087 return -EINVAL; 1088 } 1089 1090 /* 1091 * Already booted CPU? 1092 */ 1093 if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 1094 pr_debug("do_boot_cpu %d Already started\n", cpu); 1095 return -ENOSYS; 1096 } 1097 1098 /* 1099 * Save current MTRR state in case it was changed since early boot 1100 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: 1101 */ 1102 mtrr_save_state(); 1103 1104 /* x86 CPUs take themselves offline, so delayed offline is OK. */ 1105 err = cpu_check_up_prepare(cpu); 1106 if (err && err != -EBUSY) 1107 return err; 1108 1109 /* the FPU context is blank, nobody can own it */ 1110 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; 1111 1112 common_cpu_up(cpu, tidle); 1113 1114 err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered); 1115 if (err) { 1116 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); 1117 ret = -EIO; 1118 goto unreg_nmi; 1119 } 1120 1121 /* 1122 * Check TSC synchronization with the AP (keep irqs disabled 1123 * while doing so): 1124 */ 1125 local_irq_save(flags); 1126 check_tsc_sync_source(cpu); 1127 local_irq_restore(flags); 1128 1129 while (!cpu_online(cpu)) { 1130 cpu_relax(); 1131 touch_nmi_watchdog(); 1132 } 1133 1134 unreg_nmi: 1135 /* 1136 * Clean up the nmi handler. Do this after the callin and callout sync 1137 * to avoid impact of possible long unregister time. 1138 */ 1139 if (cpu0_nmi_registered) 1140 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); 1141 1142 return ret; 1143 } 1144 1145 /** 1146 * arch_disable_smp_support() - disables SMP support for x86 at runtime 1147 */ 1148 void arch_disable_smp_support(void) 1149 { 1150 disable_ioapic_support(); 1151 } 1152 1153 /* 1154 * Fall back to non SMP mode after errors. 1155 * 1156 * RED-PEN audit/test this more. I bet there is more state messed up here. 1157 */ 1158 static __init void disable_smp(void) 1159 { 1160 pr_info("SMP disabled\n"); 1161 1162 disable_ioapic_support(); 1163 1164 init_cpu_present(cpumask_of(0)); 1165 init_cpu_possible(cpumask_of(0)); 1166 1167 if (smp_found_config) 1168 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1169 else 1170 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1171 cpumask_set_cpu(0, topology_sibling_cpumask(0)); 1172 cpumask_set_cpu(0, topology_core_cpumask(0)); 1173 } 1174 1175 /* 1176 * Various sanity checks. 1177 */ 1178 static void __init smp_sanity_check(void) 1179 { 1180 preempt_disable(); 1181 1182 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) 1183 if (def_to_bigsmp && nr_cpu_ids > 8) { 1184 unsigned int cpu; 1185 unsigned nr; 1186 1187 pr_warn("More than 8 CPUs detected - skipping them\n" 1188 "Use CONFIG_X86_BIGSMP\n"); 1189 1190 nr = 0; 1191 for_each_present_cpu(cpu) { 1192 if (nr >= 8) 1193 set_cpu_present(cpu, false); 1194 nr++; 1195 } 1196 1197 nr = 0; 1198 for_each_possible_cpu(cpu) { 1199 if (nr >= 8) 1200 set_cpu_possible(cpu, false); 1201 nr++; 1202 } 1203 1204 nr_cpu_ids = 8; 1205 } 1206 #endif 1207 1208 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 1209 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n", 1210 hard_smp_processor_id()); 1211 1212 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 1213 } 1214 1215 /* 1216 * Should not be necessary because the MP table should list the boot 1217 * CPU too, but we do it for the sake of robustness anyway. 1218 */ 1219 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { 1220 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n", 1221 boot_cpu_physical_apicid); 1222 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 1223 } 1224 preempt_enable(); 1225 } 1226 1227 static void __init smp_cpu_index_default(void) 1228 { 1229 int i; 1230 struct cpuinfo_x86 *c; 1231 1232 for_each_possible_cpu(i) { 1233 c = &cpu_data(i); 1234 /* mark all to hotplug */ 1235 c->cpu_index = nr_cpu_ids; 1236 } 1237 } 1238 1239 static void __init smp_get_logical_apicid(void) 1240 { 1241 if (x2apic_mode) 1242 cpu0_logical_apicid = apic_read(APIC_LDR); 1243 else 1244 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); 1245 } 1246 1247 /* 1248 * Prepare for SMP bootup. 1249 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter 1250 * for common interface support. 1251 */ 1252 void __init native_smp_prepare_cpus(unsigned int max_cpus) 1253 { 1254 unsigned int i; 1255 1256 smp_cpu_index_default(); 1257 1258 /* 1259 * Setup boot CPU information 1260 */ 1261 smp_store_boot_cpu_info(); /* Final full version of the data */ 1262 cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1263 mb(); 1264 1265 for_each_possible_cpu(i) { 1266 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1267 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1268 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 1269 } 1270 1271 /* 1272 * Set 'default' x86 topology, this matches default_topology() in that 1273 * it has NUMA nodes as a topology level. See also 1274 * native_smp_cpus_done(). 1275 * 1276 * Must be done before set_cpus_sibling_map() is ran. 1277 */ 1278 set_sched_topology(x86_topology); 1279 1280 set_cpu_sibling_map(0); 1281 1282 smp_sanity_check(); 1283 1284 switch (apic_intr_mode) { 1285 case APIC_PIC: 1286 case APIC_VIRTUAL_WIRE_NO_CONFIG: 1287 disable_smp(); 1288 return; 1289 case APIC_SYMMETRIC_IO_NO_ROUTING: 1290 disable_smp(); 1291 /* Setup local timer */ 1292 x86_init.timers.setup_percpu_clockev(); 1293 return; 1294 case APIC_VIRTUAL_WIRE: 1295 case APIC_SYMMETRIC_IO: 1296 break; 1297 } 1298 1299 /* Setup local timer */ 1300 x86_init.timers.setup_percpu_clockev(); 1301 1302 smp_get_logical_apicid(); 1303 1304 pr_info("CPU0: "); 1305 print_cpu_info(&cpu_data(0)); 1306 1307 native_pv_lock_init(); 1308 1309 uv_system_init(); 1310 1311 set_mtrr_aps_delayed_init(); 1312 1313 smp_quirk_init_udelay(); 1314 1315 speculative_store_bypass_ht_init(); 1316 } 1317 1318 void arch_enable_nonboot_cpus_begin(void) 1319 { 1320 set_mtrr_aps_delayed_init(); 1321 } 1322 1323 void arch_enable_nonboot_cpus_end(void) 1324 { 1325 mtrr_aps_init(); 1326 } 1327 1328 /* 1329 * Early setup to make printk work. 1330 */ 1331 void __init native_smp_prepare_boot_cpu(void) 1332 { 1333 int me = smp_processor_id(); 1334 switch_to_new_gdt(me); 1335 /* already set me in cpu_online_mask in boot_cpu_init() */ 1336 cpumask_set_cpu(me, cpu_callout_mask); 1337 cpu_set_state_online(me); 1338 } 1339 1340 void __init calculate_max_logical_packages(void) 1341 { 1342 int ncpus; 1343 1344 /* 1345 * Today neither Intel nor AMD support heterogenous systems so 1346 * extrapolate the boot cpu's data to all packages. 1347 */ 1348 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); 1349 __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); 1350 pr_info("Max logical packages: %u\n", __max_logical_packages); 1351 } 1352 1353 void __init native_smp_cpus_done(unsigned int max_cpus) 1354 { 1355 pr_debug("Boot done\n"); 1356 1357 calculate_max_logical_packages(); 1358 1359 if (x86_has_numa_in_package) 1360 set_sched_topology(x86_numa_in_package_topology); 1361 1362 nmi_selftest(); 1363 impress_friends(); 1364 mtrr_aps_init(); 1365 } 1366 1367 static int __initdata setup_possible_cpus = -1; 1368 static int __init _setup_possible_cpus(char *str) 1369 { 1370 get_option(&str, &setup_possible_cpus); 1371 return 0; 1372 } 1373 early_param("possible_cpus", _setup_possible_cpus); 1374 1375 1376 /* 1377 * cpu_possible_mask should be static, it cannot change as cpu's 1378 * are onlined, or offlined. The reason is per-cpu data-structures 1379 * are allocated by some modules at init time, and dont expect to 1380 * do this dynamically on cpu arrival/departure. 1381 * cpu_present_mask on the other hand can change dynamically. 1382 * In case when cpu_hotplug is not compiled, then we resort to current 1383 * behaviour, which is cpu_possible == cpu_present. 1384 * - Ashok Raj 1385 * 1386 * Three ways to find out the number of additional hotplug CPUs: 1387 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 1388 * - The user can overwrite it with possible_cpus=NUM 1389 * - Otherwise don't reserve additional CPUs. 1390 * We do this because additional CPUs waste a lot of memory. 1391 * -AK 1392 */ 1393 __init void prefill_possible_map(void) 1394 { 1395 int i, possible; 1396 1397 /* No boot processor was found in mptable or ACPI MADT */ 1398 if (!num_processors) { 1399 if (boot_cpu_has(X86_FEATURE_APIC)) { 1400 int apicid = boot_cpu_physical_apicid; 1401 int cpu = hard_smp_processor_id(); 1402 1403 pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu); 1404 1405 /* Make sure boot cpu is enumerated */ 1406 if (apic->cpu_present_to_apicid(0) == BAD_APICID && 1407 apic->apic_id_valid(apicid)) 1408 generic_processor_info(apicid, boot_cpu_apic_version); 1409 } 1410 1411 if (!num_processors) 1412 num_processors = 1; 1413 } 1414 1415 i = setup_max_cpus ?: 1; 1416 if (setup_possible_cpus == -1) { 1417 possible = num_processors; 1418 #ifdef CONFIG_HOTPLUG_CPU 1419 if (setup_max_cpus) 1420 possible += disabled_cpus; 1421 #else 1422 if (possible > i) 1423 possible = i; 1424 #endif 1425 } else 1426 possible = setup_possible_cpus; 1427 1428 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1429 1430 /* nr_cpu_ids could be reduced via nr_cpus= */ 1431 if (possible > nr_cpu_ids) { 1432 pr_warn("%d Processors exceeds NR_CPUS limit of %u\n", 1433 possible, nr_cpu_ids); 1434 possible = nr_cpu_ids; 1435 } 1436 1437 #ifdef CONFIG_HOTPLUG_CPU 1438 if (!setup_max_cpus) 1439 #endif 1440 if (possible > i) { 1441 pr_warn("%d Processors exceeds max_cpus limit of %u\n", 1442 possible, setup_max_cpus); 1443 possible = i; 1444 } 1445 1446 nr_cpu_ids = possible; 1447 1448 pr_info("Allowing %d CPUs, %d hotplug CPUs\n", 1449 possible, max_t(int, possible - num_processors, 0)); 1450 1451 reset_cpu_possible_mask(); 1452 1453 for (i = 0; i < possible; i++) 1454 set_cpu_possible(i, true); 1455 } 1456 1457 #ifdef CONFIG_HOTPLUG_CPU 1458 1459 /* Recompute SMT state for all CPUs on offline */ 1460 static void recompute_smt_state(void) 1461 { 1462 int max_threads, cpu; 1463 1464 max_threads = 0; 1465 for_each_online_cpu (cpu) { 1466 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); 1467 1468 if (threads > max_threads) 1469 max_threads = threads; 1470 } 1471 __max_smt_threads = max_threads; 1472 } 1473 1474 static void remove_siblinginfo(int cpu) 1475 { 1476 int sibling; 1477 struct cpuinfo_x86 *c = &cpu_data(cpu); 1478 1479 for_each_cpu(sibling, topology_core_cpumask(cpu)) { 1480 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); 1481 /*/ 1482 * last thread sibling in this cpu core going down 1483 */ 1484 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) 1485 cpu_data(sibling).booted_cores--; 1486 } 1487 1488 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) 1489 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); 1490 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) 1491 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); 1492 cpumask_clear(cpu_llc_shared_mask(cpu)); 1493 cpumask_clear(topology_sibling_cpumask(cpu)); 1494 cpumask_clear(topology_core_cpumask(cpu)); 1495 c->cpu_core_id = 0; 1496 c->booted_cores = 0; 1497 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1498 recompute_smt_state(); 1499 } 1500 1501 static void remove_cpu_from_maps(int cpu) 1502 { 1503 set_cpu_online(cpu, false); 1504 cpumask_clear_cpu(cpu, cpu_callout_mask); 1505 cpumask_clear_cpu(cpu, cpu_callin_mask); 1506 /* was set by cpu_init() */ 1507 cpumask_clear_cpu(cpu, cpu_initialized_mask); 1508 numa_remove_cpu(cpu); 1509 } 1510 1511 void cpu_disable_common(void) 1512 { 1513 int cpu = smp_processor_id(); 1514 1515 remove_siblinginfo(cpu); 1516 1517 /* It's now safe to remove this processor from the online map */ 1518 lock_vector_lock(); 1519 remove_cpu_from_maps(cpu); 1520 unlock_vector_lock(); 1521 fixup_irqs(); 1522 lapic_offline(); 1523 } 1524 1525 int native_cpu_disable(void) 1526 { 1527 int ret; 1528 1529 ret = lapic_can_unplug_cpu(); 1530 if (ret) 1531 return ret; 1532 1533 clear_local_APIC(); 1534 cpu_disable_common(); 1535 1536 return 0; 1537 } 1538 1539 int common_cpu_die(unsigned int cpu) 1540 { 1541 int ret = 0; 1542 1543 /* We don't do anything here: idle task is faking death itself. */ 1544 1545 /* They ack this in play_dead() by setting CPU_DEAD */ 1546 if (cpu_wait_death(cpu, 5)) { 1547 if (system_state == SYSTEM_RUNNING) 1548 pr_info("CPU %u is now offline\n", cpu); 1549 } else { 1550 pr_err("CPU %u didn't die...\n", cpu); 1551 ret = -1; 1552 } 1553 1554 return ret; 1555 } 1556 1557 void native_cpu_die(unsigned int cpu) 1558 { 1559 common_cpu_die(cpu); 1560 } 1561 1562 void play_dead_common(void) 1563 { 1564 idle_task_exit(); 1565 1566 /* Ack it */ 1567 (void)cpu_report_death(); 1568 1569 /* 1570 * With physical CPU hotplug, we should halt the cpu 1571 */ 1572 local_irq_disable(); 1573 } 1574 1575 static bool wakeup_cpu0(void) 1576 { 1577 if (smp_processor_id() == 0 && enable_start_cpu0) 1578 return true; 1579 1580 return false; 1581 } 1582 1583 /* 1584 * We need to flush the caches before going to sleep, lest we have 1585 * dirty data in our caches when we come back up. 1586 */ 1587 static inline void mwait_play_dead(void) 1588 { 1589 unsigned int eax, ebx, ecx, edx; 1590 unsigned int highest_cstate = 0; 1591 unsigned int highest_subcstate = 0; 1592 void *mwait_ptr; 1593 int i; 1594 1595 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 1596 return; 1597 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1598 return; 1599 if (!this_cpu_has(X86_FEATURE_CLFLUSH)) 1600 return; 1601 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) 1602 return; 1603 1604 eax = CPUID_MWAIT_LEAF; 1605 ecx = 0; 1606 native_cpuid(&eax, &ebx, &ecx, &edx); 1607 1608 /* 1609 * eax will be 0 if EDX enumeration is not valid. 1610 * Initialized below to cstate, sub_cstate value when EDX is valid. 1611 */ 1612 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { 1613 eax = 0; 1614 } else { 1615 edx >>= MWAIT_SUBSTATE_SIZE; 1616 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { 1617 if (edx & MWAIT_SUBSTATE_MASK) { 1618 highest_cstate = i; 1619 highest_subcstate = edx & MWAIT_SUBSTATE_MASK; 1620 } 1621 } 1622 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | 1623 (highest_subcstate - 1); 1624 } 1625 1626 /* 1627 * This should be a memory location in a cache line which is 1628 * unlikely to be touched by other processors. The actual 1629 * content is immaterial as it is not actually modified in any way. 1630 */ 1631 mwait_ptr = ¤t_thread_info()->flags; 1632 1633 wbinvd(); 1634 1635 while (1) { 1636 /* 1637 * The CLFLUSH is a workaround for erratum AAI65 for 1638 * the Xeon 7400 series. It's not clear it is actually 1639 * needed, but it should be harmless in either case. 1640 * The WBINVD is insufficient due to the spurious-wakeup 1641 * case where we return around the loop. 1642 */ 1643 mb(); 1644 clflush(mwait_ptr); 1645 mb(); 1646 __monitor(mwait_ptr, 0, 0); 1647 mb(); 1648 __mwait(eax, 0); 1649 /* 1650 * If NMI wants to wake up CPU0, start CPU0. 1651 */ 1652 if (wakeup_cpu0()) 1653 start_cpu0(); 1654 } 1655 } 1656 1657 void hlt_play_dead(void) 1658 { 1659 if (__this_cpu_read(cpu_info.x86) >= 4) 1660 wbinvd(); 1661 1662 while (1) { 1663 native_halt(); 1664 /* 1665 * If NMI wants to wake up CPU0, start CPU0. 1666 */ 1667 if (wakeup_cpu0()) 1668 start_cpu0(); 1669 } 1670 } 1671 1672 void native_play_dead(void) 1673 { 1674 play_dead_common(); 1675 tboot_shutdown(TB_SHUTDOWN_WFS); 1676 1677 mwait_play_dead(); /* Only returns on failure */ 1678 if (cpuidle_play_dead()) 1679 hlt_play_dead(); 1680 } 1681 1682 #else /* ... !CONFIG_HOTPLUG_CPU */ 1683 int native_cpu_disable(void) 1684 { 1685 return -ENOSYS; 1686 } 1687 1688 void native_cpu_die(unsigned int cpu) 1689 { 1690 /* We said "no" in __cpu_disable */ 1691 BUG(); 1692 } 1693 1694 void native_play_dead(void) 1695 { 1696 BUG(); 1697 } 1698 1699 #endif 1700