1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * x86 SMP booting functions 4 * 5 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 6 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> 7 * Copyright 2001 Andi Kleen, SuSE Labs. 8 * 9 * Much of the core SMP work is based on previous work by Thomas Radke, to 10 * whom a great many thanks are extended. 11 * 12 * Thanks to Intel for making available several different Pentium, 13 * Pentium Pro and Pentium-II/Xeon MP machines. 14 * Original development of Linux SMP code supported by Caldera. 15 * 16 * Fixes 17 * Felix Koop : NR_CPUS used properly 18 * Jose Renau : Handle single CPU case. 19 * Alan Cox : By repeated request 8) - Total BogoMIPS report. 20 * Greg Wright : Fix for kernel stacks panic. 21 * Erich Boleyn : MP v1.4 and additional changes. 22 * Matthias Sattler : Changes for 2.1 kernel map. 23 * Michel Lespinasse : Changes for 2.1 kernel map. 24 * Michael Chastain : Change trampoline.S to gnu as. 25 * Alan Cox : Dumb bug: 'B' step PPro's are fine 26 * Ingo Molnar : Added APIC timers, based on code 27 * from Jose Renau 28 * Ingo Molnar : various cleanups and rewrites 29 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. 30 * Maciej W. Rozycki : Bits for genuine 82489DX APICs 31 * Andi Kleen : Changed for SMP boot into long mode. 32 * Martin J. Bligh : Added support for multi-quad systems 33 * Dave Jones : Report invalid combinations of Athlon CPUs. 34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. 35 * Andi Kleen : Converted to new state machine. 36 * Ashok Raj : CPU hotplug support 37 * Glauber Costa : i386 and x86_64 integration 38 */ 39 40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 41 42 #include <linux/init.h> 43 #include <linux/smp.h> 44 #include <linux/export.h> 45 #include <linux/sched.h> 46 #include <linux/sched/topology.h> 47 #include <linux/sched/hotplug.h> 48 #include <linux/sched/task_stack.h> 49 #include <linux/percpu.h> 50 #include <linux/memblock.h> 51 #include <linux/err.h> 52 #include <linux/nmi.h> 53 #include <linux/tboot.h> 54 #include <linux/gfp.h> 55 #include <linux/cpuidle.h> 56 #include <linux/numa.h> 57 #include <linux/pgtable.h> 58 #include <linux/overflow.h> 59 60 #include <asm/acpi.h> 61 #include <asm/desc.h> 62 #include <asm/nmi.h> 63 #include <asm/irq.h> 64 #include <asm/realmode.h> 65 #include <asm/cpu.h> 66 #include <asm/numa.h> 67 #include <asm/tlbflush.h> 68 #include <asm/mtrr.h> 69 #include <asm/mwait.h> 70 #include <asm/apic.h> 71 #include <asm/io_apic.h> 72 #include <asm/fpu/internal.h> 73 #include <asm/setup.h> 74 #include <asm/uv/uv.h> 75 #include <linux/mc146818rtc.h> 76 #include <asm/i8259.h> 77 #include <asm/misc.h> 78 #include <asm/qspinlock.h> 79 #include <asm/intel-family.h> 80 #include <asm/cpu_device_id.h> 81 #include <asm/spec-ctrl.h> 82 #include <asm/hw_irq.h> 83 #include <asm/stackprotector.h> 84 85 /* representing HT siblings of each logical CPU */ 86 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 87 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); 88 89 /* representing HT and core siblings of each logical CPU */ 90 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 91 EXPORT_PER_CPU_SYMBOL(cpu_core_map); 92 93 /* representing HT, core, and die siblings of each logical CPU */ 94 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); 95 EXPORT_PER_CPU_SYMBOL(cpu_die_map); 96 97 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); 98 99 /* Per CPU bogomips and other parameters */ 100 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); 101 EXPORT_PER_CPU_SYMBOL(cpu_info); 102 103 /* Logical package management. We might want to allocate that dynamically */ 104 unsigned int __max_logical_packages __read_mostly; 105 EXPORT_SYMBOL(__max_logical_packages); 106 static unsigned int logical_packages __read_mostly; 107 static unsigned int logical_die __read_mostly; 108 109 /* Maximum number of SMT threads on any online core */ 110 int __read_mostly __max_smt_threads = 1; 111 112 /* Flag to indicate if a complete sched domain rebuild is required */ 113 bool x86_topology_update; 114 115 int arch_update_cpu_topology(void) 116 { 117 int retval = x86_topology_update; 118 119 x86_topology_update = false; 120 return retval; 121 } 122 123 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) 124 { 125 unsigned long flags; 126 127 spin_lock_irqsave(&rtc_lock, flags); 128 CMOS_WRITE(0xa, 0xf); 129 spin_unlock_irqrestore(&rtc_lock, flags); 130 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = 131 start_eip >> 4; 132 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 133 start_eip & 0xf; 134 } 135 136 static inline void smpboot_restore_warm_reset_vector(void) 137 { 138 unsigned long flags; 139 140 /* 141 * Paranoid: Set warm reset code and vector here back 142 * to default values. 143 */ 144 spin_lock_irqsave(&rtc_lock, flags); 145 CMOS_WRITE(0, 0xf); 146 spin_unlock_irqrestore(&rtc_lock, flags); 147 148 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; 149 } 150 151 static void init_freq_invariance(bool secondary); 152 153 /* 154 * Report back to the Boot Processor during boot time or to the caller processor 155 * during CPU online. 156 */ 157 static void smp_callin(void) 158 { 159 int cpuid; 160 161 /* 162 * If waken up by an INIT in an 82489DX configuration 163 * cpu_callout_mask guarantees we don't get here before 164 * an INIT_deassert IPI reaches our local APIC, so it is 165 * now safe to touch our local APIC. 166 */ 167 cpuid = smp_processor_id(); 168 169 /* 170 * the boot CPU has finished the init stage and is spinning 171 * on callin_map until we finish. We are free to set up this 172 * CPU, first the APIC. (this is probably redundant on most 173 * boards) 174 */ 175 apic_ap_setup(); 176 177 /* 178 * Save our processor parameters. Note: this information 179 * is needed for clock calibration. 180 */ 181 smp_store_cpu_info(cpuid); 182 183 /* 184 * The topology information must be up to date before 185 * calibrate_delay() and notify_cpu_starting(). 186 */ 187 set_cpu_sibling_map(raw_smp_processor_id()); 188 189 init_freq_invariance(true); 190 191 /* 192 * Get our bogomips. 193 * Update loops_per_jiffy in cpu_data. Previous call to 194 * smp_store_cpu_info() stored a value that is close but not as 195 * accurate as the value just calculated. 196 */ 197 calibrate_delay(); 198 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; 199 pr_debug("Stack at about %p\n", &cpuid); 200 201 wmb(); 202 203 notify_cpu_starting(cpuid); 204 205 /* 206 * Allow the master to continue. 207 */ 208 cpumask_set_cpu(cpuid, cpu_callin_mask); 209 } 210 211 static int cpu0_logical_apicid; 212 static int enable_start_cpu0; 213 /* 214 * Activate a secondary processor. 215 */ 216 static void notrace start_secondary(void *unused) 217 { 218 /* 219 * Don't put *anything* except direct CPU state initialization 220 * before cpu_init(), SMP booting is too fragile that we want to 221 * limit the things done here to the most necessary things. 222 */ 223 cr4_init(); 224 225 #ifdef CONFIG_X86_32 226 /* switch away from the initial page table */ 227 load_cr3(swapper_pg_dir); 228 __flush_tlb_all(); 229 #endif 230 cpu_init_exception_handling(); 231 cpu_init(); 232 x86_cpuinit.early_percpu_clock_init(); 233 preempt_disable(); 234 smp_callin(); 235 236 enable_start_cpu0 = 0; 237 238 /* otherwise gcc will move up smp_processor_id before the cpu_init */ 239 barrier(); 240 /* 241 * Check TSC synchronization with the boot CPU: 242 */ 243 check_tsc_sync_target(); 244 245 speculative_store_bypass_ht_init(); 246 247 /* 248 * Lock vector_lock, set CPU online and bring the vector 249 * allocator online. Online must be set with vector_lock held 250 * to prevent a concurrent irq setup/teardown from seeing a 251 * half valid vector space. 252 */ 253 lock_vector_lock(); 254 set_cpu_online(smp_processor_id(), true); 255 lapic_online(); 256 unlock_vector_lock(); 257 cpu_set_state_online(smp_processor_id()); 258 x86_platform.nmi_init(); 259 260 /* enable local interrupts */ 261 local_irq_enable(); 262 263 x86_cpuinit.setup_percpu_clockev(); 264 265 wmb(); 266 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 267 } 268 269 /** 270 * topology_is_primary_thread - Check whether CPU is the primary SMT thread 271 * @cpu: CPU to check 272 */ 273 bool topology_is_primary_thread(unsigned int cpu) 274 { 275 return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu)); 276 } 277 278 /** 279 * topology_smt_supported - Check whether SMT is supported by the CPUs 280 */ 281 bool topology_smt_supported(void) 282 { 283 return smp_num_siblings > 1; 284 } 285 286 /** 287 * topology_phys_to_logical_pkg - Map a physical package id to a logical 288 * 289 * Returns logical package id or -1 if not found 290 */ 291 int topology_phys_to_logical_pkg(unsigned int phys_pkg) 292 { 293 int cpu; 294 295 for_each_possible_cpu(cpu) { 296 struct cpuinfo_x86 *c = &cpu_data(cpu); 297 298 if (c->initialized && c->phys_proc_id == phys_pkg) 299 return c->logical_proc_id; 300 } 301 return -1; 302 } 303 EXPORT_SYMBOL(topology_phys_to_logical_pkg); 304 /** 305 * topology_phys_to_logical_die - Map a physical die id to logical 306 * 307 * Returns logical die id or -1 if not found 308 */ 309 int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu) 310 { 311 int cpu; 312 int proc_id = cpu_data(cur_cpu).phys_proc_id; 313 314 for_each_possible_cpu(cpu) { 315 struct cpuinfo_x86 *c = &cpu_data(cpu); 316 317 if (c->initialized && c->cpu_die_id == die_id && 318 c->phys_proc_id == proc_id) 319 return c->logical_die_id; 320 } 321 return -1; 322 } 323 EXPORT_SYMBOL(topology_phys_to_logical_die); 324 325 /** 326 * topology_update_package_map - Update the physical to logical package map 327 * @pkg: The physical package id as retrieved via CPUID 328 * @cpu: The cpu for which this is updated 329 */ 330 int topology_update_package_map(unsigned int pkg, unsigned int cpu) 331 { 332 int new; 333 334 /* Already available somewhere? */ 335 new = topology_phys_to_logical_pkg(pkg); 336 if (new >= 0) 337 goto found; 338 339 new = logical_packages++; 340 if (new != pkg) { 341 pr_info("CPU %u Converting physical %u to logical package %u\n", 342 cpu, pkg, new); 343 } 344 found: 345 cpu_data(cpu).logical_proc_id = new; 346 return 0; 347 } 348 /** 349 * topology_update_die_map - Update the physical to logical die map 350 * @die: The die id as retrieved via CPUID 351 * @cpu: The cpu for which this is updated 352 */ 353 int topology_update_die_map(unsigned int die, unsigned int cpu) 354 { 355 int new; 356 357 /* Already available somewhere? */ 358 new = topology_phys_to_logical_die(die, cpu); 359 if (new >= 0) 360 goto found; 361 362 new = logical_die++; 363 if (new != die) { 364 pr_info("CPU %u Converting physical %u to logical die %u\n", 365 cpu, die, new); 366 } 367 found: 368 cpu_data(cpu).logical_die_id = new; 369 return 0; 370 } 371 372 void __init smp_store_boot_cpu_info(void) 373 { 374 int id = 0; /* CPU 0 */ 375 struct cpuinfo_x86 *c = &cpu_data(id); 376 377 *c = boot_cpu_data; 378 c->cpu_index = id; 379 topology_update_package_map(c->phys_proc_id, id); 380 topology_update_die_map(c->cpu_die_id, id); 381 c->initialized = true; 382 } 383 384 /* 385 * The bootstrap kernel entry code has set these up. Save them for 386 * a given CPU 387 */ 388 void smp_store_cpu_info(int id) 389 { 390 struct cpuinfo_x86 *c = &cpu_data(id); 391 392 /* Copy boot_cpu_data only on the first bringup */ 393 if (!c->initialized) 394 *c = boot_cpu_data; 395 c->cpu_index = id; 396 /* 397 * During boot time, CPU0 has this setup already. Save the info when 398 * bringing up AP or offlined CPU0. 399 */ 400 identify_secondary_cpu(c); 401 c->initialized = true; 402 } 403 404 static bool 405 topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 406 { 407 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 408 409 return (cpu_to_node(cpu1) == cpu_to_node(cpu2)); 410 } 411 412 static bool 413 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 414 { 415 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 416 417 return !WARN_ONCE(!topology_same_node(c, o), 418 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " 419 "[node: %d != %d]. Ignoring dependency.\n", 420 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); 421 } 422 423 #define link_mask(mfunc, c1, c2) \ 424 do { \ 425 cpumask_set_cpu((c1), mfunc(c2)); \ 426 cpumask_set_cpu((c2), mfunc(c1)); \ 427 } while (0) 428 429 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 430 { 431 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 432 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 433 434 if (c->phys_proc_id == o->phys_proc_id && 435 c->cpu_die_id == o->cpu_die_id && 436 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { 437 if (c->cpu_core_id == o->cpu_core_id) 438 return topology_sane(c, o, "smt"); 439 440 if ((c->cu_id != 0xff) && 441 (o->cu_id != 0xff) && 442 (c->cu_id == o->cu_id)) 443 return topology_sane(c, o, "smt"); 444 } 445 446 } else if (c->phys_proc_id == o->phys_proc_id && 447 c->cpu_die_id == o->cpu_die_id && 448 c->cpu_core_id == o->cpu_core_id) { 449 return topology_sane(c, o, "smt"); 450 } 451 452 return false; 453 } 454 455 /* 456 * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. 457 * 458 * These are Intel CPUs that enumerate an LLC that is shared by 459 * multiple NUMA nodes. The LLC on these systems is shared for 460 * off-package data access but private to the NUMA node (half 461 * of the package) for on-package access. 462 * 463 * CPUID (the source of the information about the LLC) can only 464 * enumerate the cache as being shared *or* unshared, but not 465 * this particular configuration. The CPU in this case enumerates 466 * the cache to be shared across the entire package (spanning both 467 * NUMA nodes). 468 */ 469 470 static const struct x86_cpu_id snc_cpu[] = { 471 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL), 472 {} 473 }; 474 475 static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 476 { 477 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 478 479 /* Do not match if we do not have a valid APICID for cpu: */ 480 if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) 481 return false; 482 483 /* Do not match if LLC id does not match: */ 484 if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) 485 return false; 486 487 /* 488 * Allow the SNC topology without warning. Return of false 489 * means 'c' does not share the LLC of 'o'. This will be 490 * reflected to userspace. 491 */ 492 if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) 493 return false; 494 495 return topology_sane(c, o, "llc"); 496 } 497 498 /* 499 * Unlike the other levels, we do not enforce keeping a 500 * multicore group inside a NUMA node. If this happens, we will 501 * discard the MC level of the topology later. 502 */ 503 static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 504 { 505 if (c->phys_proc_id == o->phys_proc_id) 506 return true; 507 return false; 508 } 509 510 static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 511 { 512 if ((c->phys_proc_id == o->phys_proc_id) && 513 (c->cpu_die_id == o->cpu_die_id)) 514 return true; 515 return false; 516 } 517 518 519 #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) 520 static inline int x86_sched_itmt_flags(void) 521 { 522 return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; 523 } 524 525 #ifdef CONFIG_SCHED_MC 526 static int x86_core_flags(void) 527 { 528 return cpu_core_flags() | x86_sched_itmt_flags(); 529 } 530 #endif 531 #ifdef CONFIG_SCHED_SMT 532 static int x86_smt_flags(void) 533 { 534 return cpu_smt_flags() | x86_sched_itmt_flags(); 535 } 536 #endif 537 #endif 538 539 static struct sched_domain_topology_level x86_numa_in_package_topology[] = { 540 #ifdef CONFIG_SCHED_SMT 541 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, 542 #endif 543 #ifdef CONFIG_SCHED_MC 544 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, 545 #endif 546 { NULL, }, 547 }; 548 549 static struct sched_domain_topology_level x86_topology[] = { 550 #ifdef CONFIG_SCHED_SMT 551 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) }, 552 #endif 553 #ifdef CONFIG_SCHED_MC 554 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) }, 555 #endif 556 { cpu_cpu_mask, SD_INIT_NAME(DIE) }, 557 { NULL, }, 558 }; 559 560 /* 561 * Set if a package/die has multiple NUMA nodes inside. 562 * AMD Magny-Cours, Intel Cluster-on-Die, and Intel 563 * Sub-NUMA Clustering have this. 564 */ 565 static bool x86_has_numa_in_package; 566 567 void set_cpu_sibling_map(int cpu) 568 { 569 bool has_smt = smp_num_siblings > 1; 570 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 571 struct cpuinfo_x86 *c = &cpu_data(cpu); 572 struct cpuinfo_x86 *o; 573 int i, threads; 574 575 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 576 577 if (!has_mp) { 578 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); 579 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 580 cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); 581 cpumask_set_cpu(cpu, topology_die_cpumask(cpu)); 582 c->booted_cores = 1; 583 return; 584 } 585 586 for_each_cpu(i, cpu_sibling_setup_mask) { 587 o = &cpu_data(i); 588 589 if ((i == cpu) || (has_smt && match_smt(c, o))) 590 link_mask(topology_sibling_cpumask, cpu, i); 591 592 if ((i == cpu) || (has_mp && match_llc(c, o))) 593 link_mask(cpu_llc_shared_mask, cpu, i); 594 595 } 596 597 /* 598 * This needs a separate iteration over the cpus because we rely on all 599 * topology_sibling_cpumask links to be set-up. 600 */ 601 for_each_cpu(i, cpu_sibling_setup_mask) { 602 o = &cpu_data(i); 603 604 if ((i == cpu) || (has_mp && match_pkg(c, o))) { 605 link_mask(topology_core_cpumask, cpu, i); 606 607 /* 608 * Does this new cpu bringup a new core? 609 */ 610 if (cpumask_weight( 611 topology_sibling_cpumask(cpu)) == 1) { 612 /* 613 * for each core in package, increment 614 * the booted_cores for this new cpu 615 */ 616 if (cpumask_first( 617 topology_sibling_cpumask(i)) == i) 618 c->booted_cores++; 619 /* 620 * increment the core count for all 621 * the other cpus in this package 622 */ 623 if (i != cpu) 624 cpu_data(i).booted_cores++; 625 } else if (i != cpu && !c->booted_cores) 626 c->booted_cores = cpu_data(i).booted_cores; 627 } 628 if (match_pkg(c, o) && !topology_same_node(c, o)) 629 x86_has_numa_in_package = true; 630 631 if ((i == cpu) || (has_mp && match_die(c, o))) 632 link_mask(topology_die_cpumask, cpu, i); 633 } 634 635 threads = cpumask_weight(topology_sibling_cpumask(cpu)); 636 if (threads > __max_smt_threads) 637 __max_smt_threads = threads; 638 } 639 640 /* maps the cpu to the sched domain representing multi-core */ 641 const struct cpumask *cpu_coregroup_mask(int cpu) 642 { 643 return cpu_llc_shared_mask(cpu); 644 } 645 646 static void impress_friends(void) 647 { 648 int cpu; 649 unsigned long bogosum = 0; 650 /* 651 * Allow the user to impress friends. 652 */ 653 pr_debug("Before bogomips\n"); 654 for_each_possible_cpu(cpu) 655 if (cpumask_test_cpu(cpu, cpu_callout_mask)) 656 bogosum += cpu_data(cpu).loops_per_jiffy; 657 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", 658 num_online_cpus(), 659 bogosum/(500000/HZ), 660 (bogosum/(5000/HZ))%100); 661 662 pr_debug("Before bogocount - setting activated=1\n"); 663 } 664 665 void __inquire_remote_apic(int apicid) 666 { 667 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; 668 const char * const names[] = { "ID", "VERSION", "SPIV" }; 669 int timeout; 670 u32 status; 671 672 pr_info("Inquiring remote APIC 0x%x...\n", apicid); 673 674 for (i = 0; i < ARRAY_SIZE(regs); i++) { 675 pr_info("... APIC 0x%x %s: ", apicid, names[i]); 676 677 /* 678 * Wait for idle. 679 */ 680 status = safe_apic_wait_icr_idle(); 681 if (status) 682 pr_cont("a previous APIC delivery may have failed\n"); 683 684 apic_icr_write(APIC_DM_REMRD | regs[i], apicid); 685 686 timeout = 0; 687 do { 688 udelay(100); 689 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; 690 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); 691 692 switch (status) { 693 case APIC_ICR_RR_VALID: 694 status = apic_read(APIC_RRR); 695 pr_cont("%08x\n", status); 696 break; 697 default: 698 pr_cont("failed\n"); 699 } 700 } 701 } 702 703 /* 704 * The Multiprocessor Specification 1.4 (1997) example code suggests 705 * that there should be a 10ms delay between the BSP asserting INIT 706 * and de-asserting INIT, when starting a remote processor. 707 * But that slows boot and resume on modern processors, which include 708 * many cores and don't require that delay. 709 * 710 * Cmdline "init_cpu_udelay=" is available to over-ride this delay. 711 * Modern processor families are quirked to remove the delay entirely. 712 */ 713 #define UDELAY_10MS_DEFAULT 10000 714 715 static unsigned int init_udelay = UINT_MAX; 716 717 static int __init cpu_init_udelay(char *str) 718 { 719 get_option(&str, &init_udelay); 720 721 return 0; 722 } 723 early_param("cpu_init_udelay", cpu_init_udelay); 724 725 static void __init smp_quirk_init_udelay(void) 726 { 727 /* if cmdline changed it from default, leave it alone */ 728 if (init_udelay != UINT_MAX) 729 return; 730 731 /* if modern processor, use no delay */ 732 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 733 ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) || 734 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { 735 init_udelay = 0; 736 return; 737 } 738 /* else, use legacy delay */ 739 init_udelay = UDELAY_10MS_DEFAULT; 740 } 741 742 /* 743 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal 744 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 745 * won't ... remember to clear down the APIC, etc later. 746 */ 747 int 748 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 749 { 750 u32 dm = apic->irq_dest_mode ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL; 751 unsigned long send_status, accept_status = 0; 752 int maxlvt; 753 754 /* Target chip */ 755 /* Boot on the stack */ 756 /* Kick the second */ 757 apic_icr_write(APIC_DM_NMI | dm, apicid); 758 759 pr_debug("Waiting for send to finish...\n"); 760 send_status = safe_apic_wait_icr_idle(); 761 762 /* 763 * Give the other CPU some time to accept the IPI. 764 */ 765 udelay(200); 766 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 767 maxlvt = lapic_get_maxlvt(); 768 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 769 apic_write(APIC_ESR, 0); 770 accept_status = (apic_read(APIC_ESR) & 0xEF); 771 } 772 pr_debug("NMI sent\n"); 773 774 if (send_status) 775 pr_err("APIC never delivered???\n"); 776 if (accept_status) 777 pr_err("APIC delivery error (%lx)\n", accept_status); 778 779 return (send_status | accept_status); 780 } 781 782 static int 783 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 784 { 785 unsigned long send_status = 0, accept_status = 0; 786 int maxlvt, num_starts, j; 787 788 maxlvt = lapic_get_maxlvt(); 789 790 /* 791 * Be paranoid about clearing APIC errors. 792 */ 793 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 794 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 795 apic_write(APIC_ESR, 0); 796 apic_read(APIC_ESR); 797 } 798 799 pr_debug("Asserting INIT\n"); 800 801 /* 802 * Turn INIT on target chip 803 */ 804 /* 805 * Send IPI 806 */ 807 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, 808 phys_apicid); 809 810 pr_debug("Waiting for send to finish...\n"); 811 send_status = safe_apic_wait_icr_idle(); 812 813 udelay(init_udelay); 814 815 pr_debug("Deasserting INIT\n"); 816 817 /* Target chip */ 818 /* Send IPI */ 819 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid); 820 821 pr_debug("Waiting for send to finish...\n"); 822 send_status = safe_apic_wait_icr_idle(); 823 824 mb(); 825 826 /* 827 * Should we send STARTUP IPIs ? 828 * 829 * Determine this based on the APIC version. 830 * If we don't have an integrated APIC, don't send the STARTUP IPIs. 831 */ 832 if (APIC_INTEGRATED(boot_cpu_apic_version)) 833 num_starts = 2; 834 else 835 num_starts = 0; 836 837 /* 838 * Run STARTUP IPI loop. 839 */ 840 pr_debug("#startup loops: %d\n", num_starts); 841 842 for (j = 1; j <= num_starts; j++) { 843 pr_debug("Sending STARTUP #%d\n", j); 844 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 845 apic_write(APIC_ESR, 0); 846 apic_read(APIC_ESR); 847 pr_debug("After apic_write\n"); 848 849 /* 850 * STARTUP IPI 851 */ 852 853 /* Target chip */ 854 /* Boot on the stack */ 855 /* Kick the second */ 856 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), 857 phys_apicid); 858 859 /* 860 * Give the other CPU some time to accept the IPI. 861 */ 862 if (init_udelay == 0) 863 udelay(10); 864 else 865 udelay(300); 866 867 pr_debug("Startup point 1\n"); 868 869 pr_debug("Waiting for send to finish...\n"); 870 send_status = safe_apic_wait_icr_idle(); 871 872 /* 873 * Give the other CPU some time to accept the IPI. 874 */ 875 if (init_udelay == 0) 876 udelay(10); 877 else 878 udelay(200); 879 880 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 881 apic_write(APIC_ESR, 0); 882 accept_status = (apic_read(APIC_ESR) & 0xEF); 883 if (send_status || accept_status) 884 break; 885 } 886 pr_debug("After Startup\n"); 887 888 if (send_status) 889 pr_err("APIC never delivered???\n"); 890 if (accept_status) 891 pr_err("APIC delivery error (%lx)\n", accept_status); 892 893 return (send_status | accept_status); 894 } 895 896 /* reduce the number of lines printed when booting a large cpu count system */ 897 static void announce_cpu(int cpu, int apicid) 898 { 899 static int current_node = NUMA_NO_NODE; 900 int node = early_cpu_to_node(cpu); 901 static int width, node_width; 902 903 if (!width) 904 width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ 905 906 if (!node_width) 907 node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ 908 909 if (cpu == 1) 910 printk(KERN_INFO "x86: Booting SMP configuration:\n"); 911 912 if (system_state < SYSTEM_RUNNING) { 913 if (node != current_node) { 914 if (current_node > (-1)) 915 pr_cont("\n"); 916 current_node = node; 917 918 printk(KERN_INFO ".... node %*s#%d, CPUs: ", 919 node_width - num_digits(node), " ", node); 920 } 921 922 /* Add padding for the BSP */ 923 if (cpu == 1) 924 pr_cont("%*s", width + 1, " "); 925 926 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); 927 928 } else 929 pr_info("Booting Node %d Processor %d APIC 0x%x\n", 930 node, cpu, apicid); 931 } 932 933 static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) 934 { 935 int cpu; 936 937 cpu = smp_processor_id(); 938 if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0) 939 return NMI_HANDLED; 940 941 return NMI_DONE; 942 } 943 944 /* 945 * Wake up AP by INIT, INIT, STARTUP sequence. 946 * 947 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS 948 * boot-strap code which is not a desired behavior for waking up BSP. To 949 * void the boot-strap code, wake up CPU0 by NMI instead. 950 * 951 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined 952 * (i.e. physically hot removed and then hot added), NMI won't wake it up. 953 * We'll change this code in the future to wake up hard offlined CPU0 if 954 * real platform and request are available. 955 */ 956 static int 957 wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 958 int *cpu0_nmi_registered) 959 { 960 int id; 961 int boot_error; 962 963 preempt_disable(); 964 965 /* 966 * Wake up AP by INIT, INIT, STARTUP sequence. 967 */ 968 if (cpu) { 969 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip); 970 goto out; 971 } 972 973 /* 974 * Wake up BSP by nmi. 975 * 976 * Register a NMI handler to help wake up CPU0. 977 */ 978 boot_error = register_nmi_handler(NMI_LOCAL, 979 wakeup_cpu0_nmi, 0, "wake_cpu0"); 980 981 if (!boot_error) { 982 enable_start_cpu0 = 1; 983 *cpu0_nmi_registered = 1; 984 if (apic->irq_dest_mode) 985 id = cpu0_logical_apicid; 986 else 987 id = apicid; 988 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip); 989 } 990 991 out: 992 preempt_enable(); 993 994 return boot_error; 995 } 996 997 int common_cpu_up(unsigned int cpu, struct task_struct *idle) 998 { 999 int ret; 1000 1001 /* Just in case we booted with a single CPU. */ 1002 alternatives_enable_smp(); 1003 1004 per_cpu(current_task, cpu) = idle; 1005 cpu_init_stack_canary(cpu, idle); 1006 1007 /* Initialize the interrupt stack(s) */ 1008 ret = irq_init_percpu_irqstack(cpu); 1009 if (ret) 1010 return ret; 1011 1012 #ifdef CONFIG_X86_32 1013 /* Stack for startup_32 can be just as for start_secondary onwards */ 1014 per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); 1015 #else 1016 initial_gs = per_cpu_offset(cpu); 1017 #endif 1018 return 0; 1019 } 1020 1021 /* 1022 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 1023 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 1024 * Returns zero if CPU booted OK, else error code from 1025 * ->wakeup_secondary_cpu. 1026 */ 1027 static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, 1028 int *cpu0_nmi_registered) 1029 { 1030 /* start_ip had better be page-aligned! */ 1031 unsigned long start_ip = real_mode_header->trampoline_start; 1032 1033 unsigned long boot_error = 0; 1034 unsigned long timeout; 1035 1036 idle->thread.sp = (unsigned long)task_pt_regs(idle); 1037 early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu); 1038 initial_code = (unsigned long)start_secondary; 1039 initial_stack = idle->thread.sp; 1040 1041 /* Enable the espfix hack for this CPU */ 1042 init_espfix_ap(cpu); 1043 1044 /* So we see what's up */ 1045 announce_cpu(cpu, apicid); 1046 1047 /* 1048 * This grunge runs the startup process for 1049 * the targeted processor. 1050 */ 1051 1052 if (x86_platform.legacy.warm_reset) { 1053 1054 pr_debug("Setting warm reset code and vector.\n"); 1055 1056 smpboot_setup_warm_reset_vector(start_ip); 1057 /* 1058 * Be paranoid about clearing APIC errors. 1059 */ 1060 if (APIC_INTEGRATED(boot_cpu_apic_version)) { 1061 apic_write(APIC_ESR, 0); 1062 apic_read(APIC_ESR); 1063 } 1064 } 1065 1066 /* 1067 * AP might wait on cpu_callout_mask in cpu_init() with 1068 * cpu_initialized_mask set if previous attempt to online 1069 * it timed-out. Clear cpu_initialized_mask so that after 1070 * INIT/SIPI it could start with a clean state. 1071 */ 1072 cpumask_clear_cpu(cpu, cpu_initialized_mask); 1073 smp_mb(); 1074 1075 /* 1076 * Wake up a CPU in difference cases: 1077 * - Use the method in the APIC driver if it's defined 1078 * Otherwise, 1079 * - Use an INIT boot APIC message for APs or NMI for BSP. 1080 */ 1081 if (apic->wakeup_secondary_cpu) 1082 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip); 1083 else 1084 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, 1085 cpu0_nmi_registered); 1086 1087 if (!boot_error) { 1088 /* 1089 * Wait 10s total for first sign of life from AP 1090 */ 1091 boot_error = -1; 1092 timeout = jiffies + 10*HZ; 1093 while (time_before(jiffies, timeout)) { 1094 if (cpumask_test_cpu(cpu, cpu_initialized_mask)) { 1095 /* 1096 * Tell AP to proceed with initialization 1097 */ 1098 cpumask_set_cpu(cpu, cpu_callout_mask); 1099 boot_error = 0; 1100 break; 1101 } 1102 schedule(); 1103 } 1104 } 1105 1106 if (!boot_error) { 1107 /* 1108 * Wait till AP completes initial initialization 1109 */ 1110 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) { 1111 /* 1112 * Allow other tasks to run while we wait for the 1113 * AP to come online. This also gives a chance 1114 * for the MTRR work(triggered by the AP coming online) 1115 * to be completed in the stop machine context. 1116 */ 1117 schedule(); 1118 } 1119 } 1120 1121 if (x86_platform.legacy.warm_reset) { 1122 /* 1123 * Cleanup possible dangling ends... 1124 */ 1125 smpboot_restore_warm_reset_vector(); 1126 } 1127 1128 return boot_error; 1129 } 1130 1131 int native_cpu_up(unsigned int cpu, struct task_struct *tidle) 1132 { 1133 int apicid = apic->cpu_present_to_apicid(cpu); 1134 int cpu0_nmi_registered = 0; 1135 unsigned long flags; 1136 int err, ret = 0; 1137 1138 lockdep_assert_irqs_enabled(); 1139 1140 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); 1141 1142 if (apicid == BAD_APICID || 1143 !physid_isset(apicid, phys_cpu_present_map) || 1144 !apic->apic_id_valid(apicid)) { 1145 pr_err("%s: bad cpu %d\n", __func__, cpu); 1146 return -EINVAL; 1147 } 1148 1149 /* 1150 * Already booted CPU? 1151 */ 1152 if (cpumask_test_cpu(cpu, cpu_callin_mask)) { 1153 pr_debug("do_boot_cpu %d Already started\n", cpu); 1154 return -ENOSYS; 1155 } 1156 1157 /* 1158 * Save current MTRR state in case it was changed since early boot 1159 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: 1160 */ 1161 mtrr_save_state(); 1162 1163 /* x86 CPUs take themselves offline, so delayed offline is OK. */ 1164 err = cpu_check_up_prepare(cpu); 1165 if (err && err != -EBUSY) 1166 return err; 1167 1168 /* the FPU context is blank, nobody can own it */ 1169 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; 1170 1171 err = common_cpu_up(cpu, tidle); 1172 if (err) 1173 return err; 1174 1175 err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered); 1176 if (err) { 1177 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); 1178 ret = -EIO; 1179 goto unreg_nmi; 1180 } 1181 1182 /* 1183 * Check TSC synchronization with the AP (keep irqs disabled 1184 * while doing so): 1185 */ 1186 local_irq_save(flags); 1187 check_tsc_sync_source(cpu); 1188 local_irq_restore(flags); 1189 1190 while (!cpu_online(cpu)) { 1191 cpu_relax(); 1192 touch_nmi_watchdog(); 1193 } 1194 1195 unreg_nmi: 1196 /* 1197 * Clean up the nmi handler. Do this after the callin and callout sync 1198 * to avoid impact of possible long unregister time. 1199 */ 1200 if (cpu0_nmi_registered) 1201 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0"); 1202 1203 return ret; 1204 } 1205 1206 /** 1207 * arch_disable_smp_support() - disables SMP support for x86 at runtime 1208 */ 1209 void arch_disable_smp_support(void) 1210 { 1211 disable_ioapic_support(); 1212 } 1213 1214 /* 1215 * Fall back to non SMP mode after errors. 1216 * 1217 * RED-PEN audit/test this more. I bet there is more state messed up here. 1218 */ 1219 static __init void disable_smp(void) 1220 { 1221 pr_info("SMP disabled\n"); 1222 1223 disable_ioapic_support(); 1224 1225 init_cpu_present(cpumask_of(0)); 1226 init_cpu_possible(cpumask_of(0)); 1227 1228 if (smp_found_config) 1229 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1230 else 1231 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1232 cpumask_set_cpu(0, topology_sibling_cpumask(0)); 1233 cpumask_set_cpu(0, topology_core_cpumask(0)); 1234 cpumask_set_cpu(0, topology_die_cpumask(0)); 1235 } 1236 1237 /* 1238 * Various sanity checks. 1239 */ 1240 static void __init smp_sanity_check(void) 1241 { 1242 preempt_disable(); 1243 1244 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32) 1245 if (def_to_bigsmp && nr_cpu_ids > 8) { 1246 unsigned int cpu; 1247 unsigned nr; 1248 1249 pr_warn("More than 8 CPUs detected - skipping them\n" 1250 "Use CONFIG_X86_BIGSMP\n"); 1251 1252 nr = 0; 1253 for_each_present_cpu(cpu) { 1254 if (nr >= 8) 1255 set_cpu_present(cpu, false); 1256 nr++; 1257 } 1258 1259 nr = 0; 1260 for_each_possible_cpu(cpu) { 1261 if (nr >= 8) 1262 set_cpu_possible(cpu, false); 1263 nr++; 1264 } 1265 1266 nr_cpu_ids = 8; 1267 } 1268 #endif 1269 1270 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) { 1271 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n", 1272 hard_smp_processor_id()); 1273 1274 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 1275 } 1276 1277 /* 1278 * Should not be necessary because the MP table should list the boot 1279 * CPU too, but we do it for the sake of robustness anyway. 1280 */ 1281 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) { 1282 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n", 1283 boot_cpu_physical_apicid); 1284 physid_set(hard_smp_processor_id(), phys_cpu_present_map); 1285 } 1286 preempt_enable(); 1287 } 1288 1289 static void __init smp_cpu_index_default(void) 1290 { 1291 int i; 1292 struct cpuinfo_x86 *c; 1293 1294 for_each_possible_cpu(i) { 1295 c = &cpu_data(i); 1296 /* mark all to hotplug */ 1297 c->cpu_index = nr_cpu_ids; 1298 } 1299 } 1300 1301 static void __init smp_get_logical_apicid(void) 1302 { 1303 if (x2apic_mode) 1304 cpu0_logical_apicid = apic_read(APIC_LDR); 1305 else 1306 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); 1307 } 1308 1309 /* 1310 * Prepare for SMP bootup. 1311 * @max_cpus: configured maximum number of CPUs, It is a legacy parameter 1312 * for common interface support. 1313 */ 1314 void __init native_smp_prepare_cpus(unsigned int max_cpus) 1315 { 1316 unsigned int i; 1317 1318 smp_cpu_index_default(); 1319 1320 /* 1321 * Setup boot CPU information 1322 */ 1323 smp_store_boot_cpu_info(); /* Final full version of the data */ 1324 cpumask_copy(cpu_callin_mask, cpumask_of(0)); 1325 mb(); 1326 1327 for_each_possible_cpu(i) { 1328 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1329 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1330 zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL); 1331 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); 1332 } 1333 1334 /* 1335 * Set 'default' x86 topology, this matches default_topology() in that 1336 * it has NUMA nodes as a topology level. See also 1337 * native_smp_cpus_done(). 1338 * 1339 * Must be done before set_cpus_sibling_map() is ran. 1340 */ 1341 set_sched_topology(x86_topology); 1342 1343 set_cpu_sibling_map(0); 1344 init_freq_invariance(false); 1345 smp_sanity_check(); 1346 1347 switch (apic_intr_mode) { 1348 case APIC_PIC: 1349 case APIC_VIRTUAL_WIRE_NO_CONFIG: 1350 disable_smp(); 1351 return; 1352 case APIC_SYMMETRIC_IO_NO_ROUTING: 1353 disable_smp(); 1354 /* Setup local timer */ 1355 x86_init.timers.setup_percpu_clockev(); 1356 return; 1357 case APIC_VIRTUAL_WIRE: 1358 case APIC_SYMMETRIC_IO: 1359 break; 1360 } 1361 1362 /* Setup local timer */ 1363 x86_init.timers.setup_percpu_clockev(); 1364 1365 smp_get_logical_apicid(); 1366 1367 pr_info("CPU0: "); 1368 print_cpu_info(&cpu_data(0)); 1369 1370 uv_system_init(); 1371 1372 set_mtrr_aps_delayed_init(); 1373 1374 smp_quirk_init_udelay(); 1375 1376 speculative_store_bypass_ht_init(); 1377 } 1378 1379 void arch_thaw_secondary_cpus_begin(void) 1380 { 1381 set_mtrr_aps_delayed_init(); 1382 } 1383 1384 void arch_thaw_secondary_cpus_end(void) 1385 { 1386 mtrr_aps_init(); 1387 } 1388 1389 /* 1390 * Early setup to make printk work. 1391 */ 1392 void __init native_smp_prepare_boot_cpu(void) 1393 { 1394 int me = smp_processor_id(); 1395 switch_to_new_gdt(me); 1396 /* already set me in cpu_online_mask in boot_cpu_init() */ 1397 cpumask_set_cpu(me, cpu_callout_mask); 1398 cpu_set_state_online(me); 1399 native_pv_lock_init(); 1400 } 1401 1402 void __init calculate_max_logical_packages(void) 1403 { 1404 int ncpus; 1405 1406 /* 1407 * Today neither Intel nor AMD support heterogenous systems so 1408 * extrapolate the boot cpu's data to all packages. 1409 */ 1410 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); 1411 __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); 1412 pr_info("Max logical packages: %u\n", __max_logical_packages); 1413 } 1414 1415 void __init native_smp_cpus_done(unsigned int max_cpus) 1416 { 1417 pr_debug("Boot done\n"); 1418 1419 calculate_max_logical_packages(); 1420 1421 if (x86_has_numa_in_package) 1422 set_sched_topology(x86_numa_in_package_topology); 1423 1424 nmi_selftest(); 1425 impress_friends(); 1426 mtrr_aps_init(); 1427 } 1428 1429 static int __initdata setup_possible_cpus = -1; 1430 static int __init _setup_possible_cpus(char *str) 1431 { 1432 get_option(&str, &setup_possible_cpus); 1433 return 0; 1434 } 1435 early_param("possible_cpus", _setup_possible_cpus); 1436 1437 1438 /* 1439 * cpu_possible_mask should be static, it cannot change as cpu's 1440 * are onlined, or offlined. The reason is per-cpu data-structures 1441 * are allocated by some modules at init time, and don't expect to 1442 * do this dynamically on cpu arrival/departure. 1443 * cpu_present_mask on the other hand can change dynamically. 1444 * In case when cpu_hotplug is not compiled, then we resort to current 1445 * behaviour, which is cpu_possible == cpu_present. 1446 * - Ashok Raj 1447 * 1448 * Three ways to find out the number of additional hotplug CPUs: 1449 * - If the BIOS specified disabled CPUs in ACPI/mptables use that. 1450 * - The user can overwrite it with possible_cpus=NUM 1451 * - Otherwise don't reserve additional CPUs. 1452 * We do this because additional CPUs waste a lot of memory. 1453 * -AK 1454 */ 1455 __init void prefill_possible_map(void) 1456 { 1457 int i, possible; 1458 1459 /* No boot processor was found in mptable or ACPI MADT */ 1460 if (!num_processors) { 1461 if (boot_cpu_has(X86_FEATURE_APIC)) { 1462 int apicid = boot_cpu_physical_apicid; 1463 int cpu = hard_smp_processor_id(); 1464 1465 pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu); 1466 1467 /* Make sure boot cpu is enumerated */ 1468 if (apic->cpu_present_to_apicid(0) == BAD_APICID && 1469 apic->apic_id_valid(apicid)) 1470 generic_processor_info(apicid, boot_cpu_apic_version); 1471 } 1472 1473 if (!num_processors) 1474 num_processors = 1; 1475 } 1476 1477 i = setup_max_cpus ?: 1; 1478 if (setup_possible_cpus == -1) { 1479 possible = num_processors; 1480 #ifdef CONFIG_HOTPLUG_CPU 1481 if (setup_max_cpus) 1482 possible += disabled_cpus; 1483 #else 1484 if (possible > i) 1485 possible = i; 1486 #endif 1487 } else 1488 possible = setup_possible_cpus; 1489 1490 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1491 1492 /* nr_cpu_ids could be reduced via nr_cpus= */ 1493 if (possible > nr_cpu_ids) { 1494 pr_warn("%d Processors exceeds NR_CPUS limit of %u\n", 1495 possible, nr_cpu_ids); 1496 possible = nr_cpu_ids; 1497 } 1498 1499 #ifdef CONFIG_HOTPLUG_CPU 1500 if (!setup_max_cpus) 1501 #endif 1502 if (possible > i) { 1503 pr_warn("%d Processors exceeds max_cpus limit of %u\n", 1504 possible, setup_max_cpus); 1505 possible = i; 1506 } 1507 1508 nr_cpu_ids = possible; 1509 1510 pr_info("Allowing %d CPUs, %d hotplug CPUs\n", 1511 possible, max_t(int, possible - num_processors, 0)); 1512 1513 reset_cpu_possible_mask(); 1514 1515 for (i = 0; i < possible; i++) 1516 set_cpu_possible(i, true); 1517 } 1518 1519 #ifdef CONFIG_HOTPLUG_CPU 1520 1521 /* Recompute SMT state for all CPUs on offline */ 1522 static void recompute_smt_state(void) 1523 { 1524 int max_threads, cpu; 1525 1526 max_threads = 0; 1527 for_each_online_cpu (cpu) { 1528 int threads = cpumask_weight(topology_sibling_cpumask(cpu)); 1529 1530 if (threads > max_threads) 1531 max_threads = threads; 1532 } 1533 __max_smt_threads = max_threads; 1534 } 1535 1536 static void remove_siblinginfo(int cpu) 1537 { 1538 int sibling; 1539 struct cpuinfo_x86 *c = &cpu_data(cpu); 1540 1541 for_each_cpu(sibling, topology_core_cpumask(cpu)) { 1542 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); 1543 /*/ 1544 * last thread sibling in this cpu core going down 1545 */ 1546 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) 1547 cpu_data(sibling).booted_cores--; 1548 } 1549 1550 for_each_cpu(sibling, topology_die_cpumask(cpu)) 1551 cpumask_clear_cpu(cpu, topology_die_cpumask(sibling)); 1552 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) 1553 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); 1554 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) 1555 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); 1556 cpumask_clear(cpu_llc_shared_mask(cpu)); 1557 cpumask_clear(topology_sibling_cpumask(cpu)); 1558 cpumask_clear(topology_core_cpumask(cpu)); 1559 cpumask_clear(topology_die_cpumask(cpu)); 1560 c->cpu_core_id = 0; 1561 c->booted_cores = 0; 1562 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1563 recompute_smt_state(); 1564 } 1565 1566 static void remove_cpu_from_maps(int cpu) 1567 { 1568 set_cpu_online(cpu, false); 1569 cpumask_clear_cpu(cpu, cpu_callout_mask); 1570 cpumask_clear_cpu(cpu, cpu_callin_mask); 1571 /* was set by cpu_init() */ 1572 cpumask_clear_cpu(cpu, cpu_initialized_mask); 1573 numa_remove_cpu(cpu); 1574 } 1575 1576 void cpu_disable_common(void) 1577 { 1578 int cpu = smp_processor_id(); 1579 1580 remove_siblinginfo(cpu); 1581 1582 /* It's now safe to remove this processor from the online map */ 1583 lock_vector_lock(); 1584 remove_cpu_from_maps(cpu); 1585 unlock_vector_lock(); 1586 fixup_irqs(); 1587 lapic_offline(); 1588 } 1589 1590 int native_cpu_disable(void) 1591 { 1592 int ret; 1593 1594 ret = lapic_can_unplug_cpu(); 1595 if (ret) 1596 return ret; 1597 1598 cpu_disable_common(); 1599 1600 /* 1601 * Disable the local APIC. Otherwise IPI broadcasts will reach 1602 * it. It still responds normally to INIT, NMI, SMI, and SIPI 1603 * messages. 1604 * 1605 * Disabling the APIC must happen after cpu_disable_common() 1606 * which invokes fixup_irqs(). 1607 * 1608 * Disabling the APIC preserves already set bits in IRR, but 1609 * an interrupt arriving after disabling the local APIC does not 1610 * set the corresponding IRR bit. 1611 * 1612 * fixup_irqs() scans IRR for set bits so it can raise a not 1613 * yet handled interrupt on the new destination CPU via an IPI 1614 * but obviously it can't do so for IRR bits which are not set. 1615 * IOW, interrupts arriving after disabling the local APIC will 1616 * be lost. 1617 */ 1618 apic_soft_disable(); 1619 1620 return 0; 1621 } 1622 1623 int common_cpu_die(unsigned int cpu) 1624 { 1625 int ret = 0; 1626 1627 /* We don't do anything here: idle task is faking death itself. */ 1628 1629 /* They ack this in play_dead() by setting CPU_DEAD */ 1630 if (cpu_wait_death(cpu, 5)) { 1631 if (system_state == SYSTEM_RUNNING) 1632 pr_info("CPU %u is now offline\n", cpu); 1633 } else { 1634 pr_err("CPU %u didn't die...\n", cpu); 1635 ret = -1; 1636 } 1637 1638 return ret; 1639 } 1640 1641 void native_cpu_die(unsigned int cpu) 1642 { 1643 common_cpu_die(cpu); 1644 } 1645 1646 void play_dead_common(void) 1647 { 1648 idle_task_exit(); 1649 1650 /* Ack it */ 1651 (void)cpu_report_death(); 1652 1653 /* 1654 * With physical CPU hotplug, we should halt the cpu 1655 */ 1656 local_irq_disable(); 1657 } 1658 1659 static bool wakeup_cpu0(void) 1660 { 1661 if (smp_processor_id() == 0 && enable_start_cpu0) 1662 return true; 1663 1664 return false; 1665 } 1666 1667 /* 1668 * We need to flush the caches before going to sleep, lest we have 1669 * dirty data in our caches when we come back up. 1670 */ 1671 static inline void mwait_play_dead(void) 1672 { 1673 unsigned int eax, ebx, ecx, edx; 1674 unsigned int highest_cstate = 0; 1675 unsigned int highest_subcstate = 0; 1676 void *mwait_ptr; 1677 int i; 1678 1679 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || 1680 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) 1681 return; 1682 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1683 return; 1684 if (!this_cpu_has(X86_FEATURE_CLFLUSH)) 1685 return; 1686 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) 1687 return; 1688 1689 eax = CPUID_MWAIT_LEAF; 1690 ecx = 0; 1691 native_cpuid(&eax, &ebx, &ecx, &edx); 1692 1693 /* 1694 * eax will be 0 if EDX enumeration is not valid. 1695 * Initialized below to cstate, sub_cstate value when EDX is valid. 1696 */ 1697 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { 1698 eax = 0; 1699 } else { 1700 edx >>= MWAIT_SUBSTATE_SIZE; 1701 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { 1702 if (edx & MWAIT_SUBSTATE_MASK) { 1703 highest_cstate = i; 1704 highest_subcstate = edx & MWAIT_SUBSTATE_MASK; 1705 } 1706 } 1707 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | 1708 (highest_subcstate - 1); 1709 } 1710 1711 /* 1712 * This should be a memory location in a cache line which is 1713 * unlikely to be touched by other processors. The actual 1714 * content is immaterial as it is not actually modified in any way. 1715 */ 1716 mwait_ptr = ¤t_thread_info()->flags; 1717 1718 wbinvd(); 1719 1720 while (1) { 1721 /* 1722 * The CLFLUSH is a workaround for erratum AAI65 for 1723 * the Xeon 7400 series. It's not clear it is actually 1724 * needed, but it should be harmless in either case. 1725 * The WBINVD is insufficient due to the spurious-wakeup 1726 * case where we return around the loop. 1727 */ 1728 mb(); 1729 clflush(mwait_ptr); 1730 mb(); 1731 __monitor(mwait_ptr, 0, 0); 1732 mb(); 1733 __mwait(eax, 0); 1734 /* 1735 * If NMI wants to wake up CPU0, start CPU0. 1736 */ 1737 if (wakeup_cpu0()) 1738 start_cpu0(); 1739 } 1740 } 1741 1742 void hlt_play_dead(void) 1743 { 1744 if (__this_cpu_read(cpu_info.x86) >= 4) 1745 wbinvd(); 1746 1747 while (1) { 1748 native_halt(); 1749 /* 1750 * If NMI wants to wake up CPU0, start CPU0. 1751 */ 1752 if (wakeup_cpu0()) 1753 start_cpu0(); 1754 } 1755 } 1756 1757 void native_play_dead(void) 1758 { 1759 play_dead_common(); 1760 tboot_shutdown(TB_SHUTDOWN_WFS); 1761 1762 mwait_play_dead(); /* Only returns on failure */ 1763 if (cpuidle_play_dead()) 1764 hlt_play_dead(); 1765 } 1766 1767 #else /* ... !CONFIG_HOTPLUG_CPU */ 1768 int native_cpu_disable(void) 1769 { 1770 return -ENOSYS; 1771 } 1772 1773 void native_cpu_die(unsigned int cpu) 1774 { 1775 /* We said "no" in __cpu_disable */ 1776 BUG(); 1777 } 1778 1779 void native_play_dead(void) 1780 { 1781 BUG(); 1782 } 1783 1784 #endif 1785 1786 #ifdef CONFIG_X86_64 1787 /* 1788 * APERF/MPERF frequency ratio computation. 1789 * 1790 * The scheduler wants to do frequency invariant accounting and needs a <1 1791 * ratio to account for the 'current' frequency, corresponding to 1792 * freq_curr / freq_max. 1793 * 1794 * Since the frequency freq_curr on x86 is controlled by micro-controller and 1795 * our P-state setting is little more than a request/hint, we need to observe 1796 * the effective frequency 'BusyMHz', i.e. the average frequency over a time 1797 * interval after discarding idle time. This is given by: 1798 * 1799 * BusyMHz = delta_APERF / delta_MPERF * freq_base 1800 * 1801 * where freq_base is the max non-turbo P-state. 1802 * 1803 * The freq_max term has to be set to a somewhat arbitrary value, because we 1804 * can't know which turbo states will be available at a given point in time: 1805 * it all depends on the thermal headroom of the entire package. We set it to 1806 * the turbo level with 4 cores active. 1807 * 1808 * Benchmarks show that's a good compromise between the 1C turbo ratio 1809 * (freq_curr/freq_max would rarely reach 1) and something close to freq_base, 1810 * which would ignore the entire turbo range (a conspicuous part, making 1811 * freq_curr/freq_max always maxed out). 1812 * 1813 * An exception to the heuristic above is the Atom uarch, where we choose the 1814 * highest turbo level for freq_max since Atom's are generally oriented towards 1815 * power efficiency. 1816 * 1817 * Setting freq_max to anything less than the 1C turbo ratio makes the ratio 1818 * freq_curr / freq_max to eventually grow >1, in which case we clip it to 1. 1819 */ 1820 1821 DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key); 1822 1823 static DEFINE_PER_CPU(u64, arch_prev_aperf); 1824 static DEFINE_PER_CPU(u64, arch_prev_mperf); 1825 static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE; 1826 static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE; 1827 1828 void arch_set_max_freq_ratio(bool turbo_disabled) 1829 { 1830 arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE : 1831 arch_turbo_freq_ratio; 1832 } 1833 1834 static bool turbo_disabled(void) 1835 { 1836 u64 misc_en; 1837 int err; 1838 1839 err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en); 1840 if (err) 1841 return false; 1842 1843 return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); 1844 } 1845 1846 static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) 1847 { 1848 int err; 1849 1850 err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq); 1851 if (err) 1852 return false; 1853 1854 err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq); 1855 if (err) 1856 return false; 1857 1858 *base_freq = (*base_freq >> 16) & 0x3F; /* max P state */ 1859 *turbo_freq = *turbo_freq & 0x3F; /* 1C turbo */ 1860 1861 return true; 1862 } 1863 1864 #include <asm/cpu_device_id.h> 1865 #include <asm/intel-family.h> 1866 1867 #define X86_MATCH(model) \ 1868 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ 1869 INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL) 1870 1871 static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = { 1872 X86_MATCH(XEON_PHI_KNL), 1873 X86_MATCH(XEON_PHI_KNM), 1874 {} 1875 }; 1876 1877 static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = { 1878 X86_MATCH(SKYLAKE_X), 1879 {} 1880 }; 1881 1882 static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = { 1883 X86_MATCH(ATOM_GOLDMONT), 1884 X86_MATCH(ATOM_GOLDMONT_D), 1885 X86_MATCH(ATOM_GOLDMONT_PLUS), 1886 {} 1887 }; 1888 1889 static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, 1890 int num_delta_fratio) 1891 { 1892 int fratio, delta_fratio, found; 1893 int err, i; 1894 u64 msr; 1895 1896 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 1897 if (err) 1898 return false; 1899 1900 *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ 1901 1902 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); 1903 if (err) 1904 return false; 1905 1906 fratio = (msr >> 8) & 0xFF; 1907 i = 16; 1908 found = 0; 1909 do { 1910 if (found >= num_delta_fratio) { 1911 *turbo_freq = fratio; 1912 return true; 1913 } 1914 1915 delta_fratio = (msr >> (i + 5)) & 0x7; 1916 1917 if (delta_fratio) { 1918 found += 1; 1919 fratio -= delta_fratio; 1920 } 1921 1922 i += 8; 1923 } while (i < 64); 1924 1925 return true; 1926 } 1927 1928 static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size) 1929 { 1930 u64 ratios, counts; 1931 u32 group_size; 1932 int err, i; 1933 1934 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 1935 if (err) 1936 return false; 1937 1938 *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ 1939 1940 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios); 1941 if (err) 1942 return false; 1943 1944 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts); 1945 if (err) 1946 return false; 1947 1948 for (i = 0; i < 64; i += 8) { 1949 group_size = (counts >> i) & 0xFF; 1950 if (group_size >= size) { 1951 *turbo_freq = (ratios >> i) & 0xFF; 1952 return true; 1953 } 1954 } 1955 1956 return false; 1957 } 1958 1959 static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) 1960 { 1961 u64 msr; 1962 int err; 1963 1964 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); 1965 if (err) 1966 return false; 1967 1968 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); 1969 if (err) 1970 return false; 1971 1972 *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ 1973 *turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */ 1974 1975 /* The CPU may have less than 4 cores */ 1976 if (!*turbo_freq) 1977 *turbo_freq = msr & 0xFF; /* 1C turbo */ 1978 1979 return true; 1980 } 1981 1982 static bool intel_set_max_freq_ratio(void) 1983 { 1984 u64 base_freq, turbo_freq; 1985 u64 turbo_ratio; 1986 1987 if (slv_set_max_freq_ratio(&base_freq, &turbo_freq)) 1988 goto out; 1989 1990 if (x86_match_cpu(has_glm_turbo_ratio_limits) && 1991 skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) 1992 goto out; 1993 1994 if (x86_match_cpu(has_knl_turbo_ratio_limits) && 1995 knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) 1996 goto out; 1997 1998 if (x86_match_cpu(has_skx_turbo_ratio_limits) && 1999 skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4)) 2000 goto out; 2001 2002 if (core_set_max_freq_ratio(&base_freq, &turbo_freq)) 2003 goto out; 2004 2005 return false; 2006 2007 out: 2008 /* 2009 * Some hypervisors advertise X86_FEATURE_APERFMPERF 2010 * but then fill all MSR's with zeroes. 2011 * Some CPUs have turbo boost but don't declare any turbo ratio 2012 * in MSR_TURBO_RATIO_LIMIT. 2013 */ 2014 if (!base_freq || !turbo_freq) { 2015 pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n"); 2016 return false; 2017 } 2018 2019 turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq); 2020 if (!turbo_ratio) { 2021 pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n"); 2022 return false; 2023 } 2024 2025 arch_turbo_freq_ratio = turbo_ratio; 2026 arch_set_max_freq_ratio(turbo_disabled()); 2027 2028 return true; 2029 } 2030 2031 static void init_counter_refs(void) 2032 { 2033 u64 aperf, mperf; 2034 2035 rdmsrl(MSR_IA32_APERF, aperf); 2036 rdmsrl(MSR_IA32_MPERF, mperf); 2037 2038 this_cpu_write(arch_prev_aperf, aperf); 2039 this_cpu_write(arch_prev_mperf, mperf); 2040 } 2041 2042 static void init_freq_invariance(bool secondary) 2043 { 2044 bool ret = false; 2045 2046 if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) 2047 return; 2048 2049 if (secondary) { 2050 if (static_branch_likely(&arch_scale_freq_key)) { 2051 init_counter_refs(); 2052 } 2053 return; 2054 } 2055 2056 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 2057 ret = intel_set_max_freq_ratio(); 2058 2059 if (ret) { 2060 init_counter_refs(); 2061 static_branch_enable(&arch_scale_freq_key); 2062 } else { 2063 pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n"); 2064 } 2065 } 2066 2067 static void disable_freq_invariance_workfn(struct work_struct *work) 2068 { 2069 static_branch_disable(&arch_scale_freq_key); 2070 } 2071 2072 static DECLARE_WORK(disable_freq_invariance_work, 2073 disable_freq_invariance_workfn); 2074 2075 DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; 2076 2077 void arch_scale_freq_tick(void) 2078 { 2079 u64 freq_scale = SCHED_CAPACITY_SCALE; 2080 u64 aperf, mperf; 2081 u64 acnt, mcnt; 2082 2083 if (!arch_scale_freq_invariant()) 2084 return; 2085 2086 rdmsrl(MSR_IA32_APERF, aperf); 2087 rdmsrl(MSR_IA32_MPERF, mperf); 2088 2089 acnt = aperf - this_cpu_read(arch_prev_aperf); 2090 mcnt = mperf - this_cpu_read(arch_prev_mperf); 2091 2092 this_cpu_write(arch_prev_aperf, aperf); 2093 this_cpu_write(arch_prev_mperf, mperf); 2094 2095 if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) 2096 goto error; 2097 2098 if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt) 2099 goto error; 2100 2101 freq_scale = div64_u64(acnt, mcnt); 2102 if (!freq_scale) 2103 goto error; 2104 2105 if (freq_scale > SCHED_CAPACITY_SCALE) 2106 freq_scale = SCHED_CAPACITY_SCALE; 2107 2108 this_cpu_write(arch_freq_scale, freq_scale); 2109 return; 2110 2111 error: 2112 pr_warn("Scheduler frequency invariance went wobbly, disabling!\n"); 2113 schedule_work(&disable_freq_invariance_work); 2114 } 2115 #else 2116 static inline void init_freq_invariance(bool secondary) 2117 { 2118 } 2119 #endif /* CONFIG_X86_64 */ 2120