1 #include <linux/export.h> 2 #include <linux/bitops.h> 3 #include <linux/elf.h> 4 #include <linux/mm.h> 5 6 #include <linux/io.h> 7 #include <linux/sched.h> 8 #include <linux/sched/clock.h> 9 #include <linux/random.h> 10 #include <asm/processor.h> 11 #include <asm/apic.h> 12 #include <asm/cacheinfo.h> 13 #include <asm/cpu.h> 14 #include <asm/spec-ctrl.h> 15 #include <asm/smp.h> 16 #include <asm/pci-direct.h> 17 #include <asm/delay.h> 18 #include <asm/debugreg.h> 19 20 #ifdef CONFIG_X86_64 21 # include <asm/mmconfig.h> 22 # include <asm/set_memory.h> 23 #endif 24 25 #include "cpu.h" 26 27 static const int amd_erratum_383[]; 28 static const int amd_erratum_400[]; 29 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); 30 31 /* 32 * nodes_per_socket: Stores the number of nodes per socket. 33 * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX 34 * Node Identifiers[10:8] 35 */ 36 static u32 nodes_per_socket = 1; 37 38 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 39 { 40 u32 gprs[8] = { 0 }; 41 int err; 42 43 WARN_ONCE((boot_cpu_data.x86 != 0xf), 44 "%s should only be used on K8!\n", __func__); 45 46 gprs[1] = msr; 47 gprs[7] = 0x9c5a203a; 48 49 err = rdmsr_safe_regs(gprs); 50 51 *p = gprs[0] | ((u64)gprs[2] << 32); 52 53 return err; 54 } 55 56 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) 57 { 58 u32 gprs[8] = { 0 }; 59 60 WARN_ONCE((boot_cpu_data.x86 != 0xf), 61 "%s should only be used on K8!\n", __func__); 62 63 gprs[0] = (u32)val; 64 gprs[1] = msr; 65 gprs[2] = val >> 32; 66 gprs[7] = 0x9c5a203a; 67 68 return wrmsr_safe_regs(gprs); 69 } 70 71 /* 72 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 73 * misexecution of code under Linux. Owners of such processors should 74 * contact AMD for precise details and a CPU swap. 75 * 76 * See http://www.multimania.com/poulot/k6bug.html 77 * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" 78 * (Publication # 21266 Issue Date: August 1998) 79 * 80 * The following test is erm.. interesting. AMD neglected to up 81 * the chip setting when fixing the bug but they also tweaked some 82 * performance at the same time.. 83 */ 84 85 #ifdef CONFIG_X86_32 86 extern __visible void vide(void); 87 __asm__(".text\n" 88 ".globl vide\n" 89 ".type vide, @function\n" 90 ".align 4\n" 91 "vide: ret\n"); 92 #endif 93 94 static void init_amd_k5(struct cpuinfo_x86 *c) 95 { 96 #ifdef CONFIG_X86_32 97 /* 98 * General Systems BIOSen alias the cpu frequency registers 99 * of the Elan at 0x000df000. Unfortunately, one of the Linux 100 * drivers subsequently pokes it, and changes the CPU speed. 101 * Workaround : Remove the unneeded alias. 102 */ 103 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 104 #define CBAR_ENB (0x80000000) 105 #define CBAR_KEY (0X000000CB) 106 if (c->x86_model == 9 || c->x86_model == 10) { 107 if (inl(CBAR) & CBAR_ENB) 108 outl(0 | CBAR_KEY, CBAR); 109 } 110 #endif 111 } 112 113 static void init_amd_k6(struct cpuinfo_x86 *c) 114 { 115 #ifdef CONFIG_X86_32 116 u32 l, h; 117 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 118 119 if (c->x86_model < 6) { 120 /* Based on AMD doc 20734R - June 2000 */ 121 if (c->x86_model == 0) { 122 clear_cpu_cap(c, X86_FEATURE_APIC); 123 set_cpu_cap(c, X86_FEATURE_PGE); 124 } 125 return; 126 } 127 128 if (c->x86_model == 6 && c->x86_stepping == 1) { 129 const int K6_BUG_LOOP = 1000000; 130 int n; 131 void (*f_vide)(void); 132 u64 d, d2; 133 134 pr_info("AMD K6 stepping B detected - "); 135 136 /* 137 * It looks like AMD fixed the 2.6.2 bug and improved indirect 138 * calls at the same time. 139 */ 140 141 n = K6_BUG_LOOP; 142 f_vide = vide; 143 OPTIMIZER_HIDE_VAR(f_vide); 144 d = rdtsc(); 145 while (n--) 146 f_vide(); 147 d2 = rdtsc(); 148 d = d2-d; 149 150 if (d > 20*K6_BUG_LOOP) 151 pr_cont("system stability may be impaired when more than 32 MB are used.\n"); 152 else 153 pr_cont("probably OK (after B9730xxxx).\n"); 154 } 155 156 /* K6 with old style WHCR */ 157 if (c->x86_model < 8 || 158 (c->x86_model == 8 && c->x86_stepping < 8)) { 159 /* We can only write allocate on the low 508Mb */ 160 if (mbytes > 508) 161 mbytes = 508; 162 163 rdmsr(MSR_K6_WHCR, l, h); 164 if ((l&0x0000FFFF) == 0) { 165 unsigned long flags; 166 l = (1<<0)|((mbytes/4)<<1); 167 local_irq_save(flags); 168 wbinvd(); 169 wrmsr(MSR_K6_WHCR, l, h); 170 local_irq_restore(flags); 171 pr_info("Enabling old style K6 write allocation for %d Mb\n", 172 mbytes); 173 } 174 return; 175 } 176 177 if ((c->x86_model == 8 && c->x86_stepping > 7) || 178 c->x86_model == 9 || c->x86_model == 13) { 179 /* The more serious chips .. */ 180 181 if (mbytes > 4092) 182 mbytes = 4092; 183 184 rdmsr(MSR_K6_WHCR, l, h); 185 if ((l&0xFFFF0000) == 0) { 186 unsigned long flags; 187 l = ((mbytes>>2)<<22)|(1<<16); 188 local_irq_save(flags); 189 wbinvd(); 190 wrmsr(MSR_K6_WHCR, l, h); 191 local_irq_restore(flags); 192 pr_info("Enabling new style K6 write allocation for %d Mb\n", 193 mbytes); 194 } 195 196 return; 197 } 198 199 if (c->x86_model == 10) { 200 /* AMD Geode LX is model 10 */ 201 /* placeholder for any needed mods */ 202 return; 203 } 204 #endif 205 } 206 207 static void init_amd_k7(struct cpuinfo_x86 *c) 208 { 209 #ifdef CONFIG_X86_32 210 u32 l, h; 211 212 /* 213 * Bit 15 of Athlon specific MSR 15, needs to be 0 214 * to enable SSE on Palomino/Morgan/Barton CPU's. 215 * If the BIOS didn't enable it already, enable it here. 216 */ 217 if (c->x86_model >= 6 && c->x86_model <= 10) { 218 if (!cpu_has(c, X86_FEATURE_XMM)) { 219 pr_info("Enabling disabled K7/SSE Support.\n"); 220 msr_clear_bit(MSR_K7_HWCR, 15); 221 set_cpu_cap(c, X86_FEATURE_XMM); 222 } 223 } 224 225 /* 226 * It's been determined by AMD that Athlons since model 8 stepping 1 227 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 228 * As per AMD technical note 27212 0.2 229 */ 230 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) { 231 rdmsr(MSR_K7_CLK_CTL, l, h); 232 if ((l & 0xfff00000) != 0x20000000) { 233 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 234 l, ((l & 0x000fffff)|0x20000000)); 235 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 236 } 237 } 238 239 /* calling is from identify_secondary_cpu() ? */ 240 if (!c->cpu_index) 241 return; 242 243 /* 244 * Certain Athlons might work (for various values of 'work') in SMP 245 * but they are not certified as MP capable. 246 */ 247 /* Athlon 660/661 is valid. */ 248 if ((c->x86_model == 6) && ((c->x86_stepping == 0) || 249 (c->x86_stepping == 1))) 250 return; 251 252 /* Duron 670 is valid */ 253 if ((c->x86_model == 7) && (c->x86_stepping == 0)) 254 return; 255 256 /* 257 * Athlon 662, Duron 671, and Athlon >model 7 have capability 258 * bit. It's worth noting that the A5 stepping (662) of some 259 * Athlon XP's have the MP bit set. 260 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 261 * more. 262 */ 263 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) || 264 ((c->x86_model == 7) && (c->x86_stepping >= 1)) || 265 (c->x86_model > 7)) 266 if (cpu_has(c, X86_FEATURE_MP)) 267 return; 268 269 /* If we get here, not a certified SMP capable AMD system. */ 270 271 /* 272 * Don't taint if we are running SMP kernel on a single non-MP 273 * approved Athlon 274 */ 275 WARN_ONCE(1, "WARNING: This combination of AMD" 276 " processors is not suitable for SMP.\n"); 277 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 278 #endif 279 } 280 281 #ifdef CONFIG_NUMA 282 /* 283 * To workaround broken NUMA config. Read the comment in 284 * srat_detect_node(). 285 */ 286 static int nearby_node(int apicid) 287 { 288 int i, node; 289 290 for (i = apicid - 1; i >= 0; i--) { 291 node = __apicid_to_node[i]; 292 if (node != NUMA_NO_NODE && node_online(node)) 293 return node; 294 } 295 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 296 node = __apicid_to_node[i]; 297 if (node != NUMA_NO_NODE && node_online(node)) 298 return node; 299 } 300 return first_node(node_online_map); /* Shouldn't happen */ 301 } 302 #endif 303 304 /* 305 * Fix up cpu_core_id for pre-F17h systems to be in the 306 * [0 .. cores_per_node - 1] range. Not really needed but 307 * kept so as not to break existing setups. 308 */ 309 static void legacy_fixup_core_id(struct cpuinfo_x86 *c) 310 { 311 u32 cus_per_node; 312 313 if (c->x86 >= 0x17) 314 return; 315 316 cus_per_node = c->x86_max_cores / nodes_per_socket; 317 c->cpu_core_id %= cus_per_node; 318 } 319 320 321 static void amd_get_topology_early(struct cpuinfo_x86 *c) 322 { 323 if (cpu_has(c, X86_FEATURE_TOPOEXT)) 324 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; 325 } 326 327 /* 328 * Fixup core topology information for 329 * (1) AMD multi-node processors 330 * Assumption: Number of cores in each internal node is the same. 331 * (2) AMD processors supporting compute units 332 */ 333 static void amd_get_topology(struct cpuinfo_x86 *c) 334 { 335 u8 node_id; 336 int cpu = smp_processor_id(); 337 338 /* get information required for multi-node processors */ 339 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 340 int err; 341 u32 eax, ebx, ecx, edx; 342 343 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 344 345 node_id = ecx & 0xff; 346 347 if (c->x86 == 0x15) 348 c->cu_id = ebx & 0xff; 349 350 if (c->x86 >= 0x17) { 351 c->cpu_core_id = ebx & 0xff; 352 353 if (smp_num_siblings > 1) 354 c->x86_max_cores /= smp_num_siblings; 355 } 356 357 /* 358 * In case leaf B is available, use it to derive 359 * topology information. 360 */ 361 err = detect_extended_topology(c); 362 if (!err) 363 c->x86_coreid_bits = get_count_order(c->x86_max_cores); 364 365 cacheinfo_amd_init_llc_id(c, cpu, node_id); 366 367 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 368 u64 value; 369 370 rdmsrl(MSR_FAM10H_NODE_ID, value); 371 node_id = value & 7; 372 373 per_cpu(cpu_llc_id, cpu) = node_id; 374 } else 375 return; 376 377 if (nodes_per_socket > 1) { 378 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 379 legacy_fixup_core_id(c); 380 } 381 } 382 383 /* 384 * On a AMD dual core setup the lower bits of the APIC id distinguish the cores. 385 * Assumes number of cores is a power of two. 386 */ 387 static void amd_detect_cmp(struct cpuinfo_x86 *c) 388 { 389 unsigned bits; 390 int cpu = smp_processor_id(); 391 392 bits = c->x86_coreid_bits; 393 /* Low order bits define the core id (index of core in socket) */ 394 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); 395 /* Convert the initial APIC ID into the socket ID */ 396 c->phys_proc_id = c->initial_apicid >> bits; 397 /* use socket ID also for last level cache */ 398 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 399 } 400 401 u16 amd_get_nb_id(int cpu) 402 { 403 return per_cpu(cpu_llc_id, cpu); 404 } 405 EXPORT_SYMBOL_GPL(amd_get_nb_id); 406 407 u32 amd_get_nodes_per_socket(void) 408 { 409 return nodes_per_socket; 410 } 411 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket); 412 413 static void srat_detect_node(struct cpuinfo_x86 *c) 414 { 415 #ifdef CONFIG_NUMA 416 int cpu = smp_processor_id(); 417 int node; 418 unsigned apicid = c->apicid; 419 420 node = numa_cpu_node(cpu); 421 if (node == NUMA_NO_NODE) 422 node = per_cpu(cpu_llc_id, cpu); 423 424 /* 425 * On multi-fabric platform (e.g. Numascale NumaChip) a 426 * platform-specific handler needs to be called to fixup some 427 * IDs of the CPU. 428 */ 429 if (x86_cpuinit.fixup_cpu_id) 430 x86_cpuinit.fixup_cpu_id(c, node); 431 432 if (!node_online(node)) { 433 /* 434 * Two possibilities here: 435 * 436 * - The CPU is missing memory and no node was created. In 437 * that case try picking one from a nearby CPU. 438 * 439 * - The APIC IDs differ from the HyperTransport node IDs 440 * which the K8 northbridge parsing fills in. Assume 441 * they are all increased by a constant offset, but in 442 * the same order as the HT nodeids. If that doesn't 443 * result in a usable node fall back to the path for the 444 * previous case. 445 * 446 * This workaround operates directly on the mapping between 447 * APIC ID and NUMA node, assuming certain relationship 448 * between APIC ID, HT node ID and NUMA topology. As going 449 * through CPU mapping may alter the outcome, directly 450 * access __apicid_to_node[]. 451 */ 452 int ht_nodeid = c->initial_apicid; 453 454 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 455 node = __apicid_to_node[ht_nodeid]; 456 /* Pick a nearby node */ 457 if (!node_online(node)) 458 node = nearby_node(apicid); 459 } 460 numa_set_node(cpu, node); 461 #endif 462 } 463 464 static void early_init_amd_mc(struct cpuinfo_x86 *c) 465 { 466 #ifdef CONFIG_SMP 467 unsigned bits, ecx; 468 469 /* Multi core CPU? */ 470 if (c->extended_cpuid_level < 0x80000008) 471 return; 472 473 ecx = cpuid_ecx(0x80000008); 474 475 c->x86_max_cores = (ecx & 0xff) + 1; 476 477 /* CPU telling us the core id bits shift? */ 478 bits = (ecx >> 12) & 0xF; 479 480 /* Otherwise recompute */ 481 if (bits == 0) { 482 while ((1 << bits) < c->x86_max_cores) 483 bits++; 484 } 485 486 c->x86_coreid_bits = bits; 487 #endif 488 } 489 490 static void bsp_init_amd(struct cpuinfo_x86 *c) 491 { 492 493 #ifdef CONFIG_X86_64 494 if (c->x86 >= 0xf) { 495 unsigned long long tseg; 496 497 /* 498 * Split up direct mapping around the TSEG SMM area. 499 * Don't do it for gbpages because there seems very little 500 * benefit in doing so. 501 */ 502 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 503 unsigned long pfn = tseg >> PAGE_SHIFT; 504 505 pr_debug("tseg: %010llx\n", tseg); 506 if (pfn_range_is_mapped(pfn, pfn + 1)) 507 set_memory_4k((unsigned long)__va(tseg), 1); 508 } 509 } 510 #endif 511 512 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 513 514 if (c->x86 > 0x10 || 515 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 516 u64 val; 517 518 rdmsrl(MSR_K7_HWCR, val); 519 if (!(val & BIT(24))) 520 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); 521 } 522 } 523 524 if (c->x86 == 0x15) { 525 unsigned long upperbit; 526 u32 cpuid, assoc; 527 528 cpuid = cpuid_edx(0x80000005); 529 assoc = cpuid >> 16 & 0xff; 530 upperbit = ((cpuid >> 24) << 10) / assoc; 531 532 va_align.mask = (upperbit - 1) & PAGE_MASK; 533 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 534 535 /* A random value per boot for bit slice [12:upper_bit) */ 536 va_align.bits = get_random_int() & va_align.mask; 537 } 538 539 if (cpu_has(c, X86_FEATURE_MWAITX)) 540 use_mwaitx_delay(); 541 542 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 543 u32 ecx; 544 545 ecx = cpuid_ecx(0x8000001e); 546 nodes_per_socket = ((ecx >> 8) & 7) + 1; 547 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { 548 u64 value; 549 550 rdmsrl(MSR_FAM10H_NODE_ID, value); 551 nodes_per_socket = ((value >> 3) & 7) + 1; 552 } 553 554 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && 555 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) && 556 c->x86 >= 0x15 && c->x86 <= 0x17) { 557 unsigned int bit; 558 559 switch (c->x86) { 560 case 0x15: bit = 54; break; 561 case 0x16: bit = 33; break; 562 case 0x17: bit = 10; break; 563 default: return; 564 } 565 /* 566 * Try to cache the base value so further operations can 567 * avoid RMW. If that faults, do not enable SSBD. 568 */ 569 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { 570 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); 571 setup_force_cpu_cap(X86_FEATURE_SSBD); 572 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit; 573 } 574 } 575 } 576 577 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) 578 { 579 u64 msr; 580 581 /* 582 * BIOS support is required for SME and SEV. 583 * For SME: If BIOS has enabled SME then adjust x86_phys_bits by 584 * the SME physical address space reduction value. 585 * If BIOS has not enabled SME then don't advertise the 586 * SME feature (set in scattered.c). 587 * For SEV: If BIOS has not enabled SEV then don't advertise the 588 * SEV feature (set in scattered.c). 589 * 590 * In all cases, since support for SME and SEV requires long mode, 591 * don't advertise the feature under CONFIG_X86_32. 592 */ 593 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) { 594 /* Check if memory encryption is enabled */ 595 rdmsrl(MSR_K8_SYSCFG, msr); 596 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) 597 goto clear_all; 598 599 /* 600 * Always adjust physical address bits. Even though this 601 * will be a value above 32-bits this is still done for 602 * CONFIG_X86_32 so that accurate values are reported. 603 */ 604 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; 605 606 if (IS_ENABLED(CONFIG_X86_32)) 607 goto clear_all; 608 609 rdmsrl(MSR_K7_HWCR, msr); 610 if (!(msr & MSR_K7_HWCR_SMMLOCK)) 611 goto clear_sev; 612 613 return; 614 615 clear_all: 616 clear_cpu_cap(c, X86_FEATURE_SME); 617 clear_sev: 618 clear_cpu_cap(c, X86_FEATURE_SEV); 619 } 620 } 621 622 static void early_init_amd(struct cpuinfo_x86 *c) 623 { 624 u64 value; 625 u32 dummy; 626 627 early_init_amd_mc(c); 628 629 #ifdef CONFIG_X86_32 630 if (c->x86 == 6) 631 set_cpu_cap(c, X86_FEATURE_K7); 632 #endif 633 634 if (c->x86 >= 0xf) 635 set_cpu_cap(c, X86_FEATURE_K8); 636 637 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 638 639 /* 640 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 641 * with P/T states and does not stop in deep C-states 642 */ 643 if (c->x86_power & (1 << 8)) { 644 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 645 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 646 } 647 648 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ 649 if (c->x86_power & BIT(12)) 650 set_cpu_cap(c, X86_FEATURE_ACC_POWER); 651 652 #ifdef CONFIG_X86_64 653 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 654 #else 655 /* Set MTRR capability flag if appropriate */ 656 if (c->x86 == 5) 657 if (c->x86_model == 13 || c->x86_model == 9 || 658 (c->x86_model == 8 && c->x86_stepping >= 8)) 659 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 660 #endif 661 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 662 /* 663 * ApicID can always be treated as an 8-bit value for AMD APIC versions 664 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we 665 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families 666 * after 16h. 667 */ 668 if (boot_cpu_has(X86_FEATURE_APIC)) { 669 if (c->x86 > 0x16) 670 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 671 else if (c->x86 >= 0xf) { 672 /* check CPU config space for extended APIC ID */ 673 unsigned int val; 674 675 val = read_pci_config(0, 24, 0, 0x68); 676 if ((val >> 17 & 0x3) == 0x3) 677 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 678 } 679 } 680 #endif 681 682 /* 683 * This is only needed to tell the kernel whether to use VMCALL 684 * and VMMCALL. VMMCALL is never executed except under virt, so 685 * we can set it unconditionally. 686 */ 687 set_cpu_cap(c, X86_FEATURE_VMMCALL); 688 689 /* F16h erratum 793, CVE-2013-6885 */ 690 if (c->x86 == 0x16 && c->x86_model <= 0xf) 691 msr_set_bit(MSR_AMD64_LS_CFG, 15); 692 693 /* 694 * Check whether the machine is affected by erratum 400. This is 695 * used to select the proper idle routine and to enable the check 696 * whether the machine is affected in arch_post_acpi_init(), which 697 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. 698 */ 699 if (cpu_has_amd_erratum(c, amd_erratum_400)) 700 set_cpu_bug(c, X86_BUG_AMD_E400); 701 702 early_detect_mem_encrypt(c); 703 704 /* Re-enable TopologyExtensions if switched off by BIOS */ 705 if (c->x86 == 0x15 && 706 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) && 707 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 708 709 if (msr_set_bit(0xc0011005, 54) > 0) { 710 rdmsrl(0xc0011005, value); 711 if (value & BIT_64(54)) { 712 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 713 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); 714 } 715 } 716 } 717 718 amd_get_topology_early(c); 719 } 720 721 static void init_amd_k8(struct cpuinfo_x86 *c) 722 { 723 u32 level; 724 u64 value; 725 726 /* On C+ stepping K8 rep microcode works well for copy/memset */ 727 level = cpuid_eax(1); 728 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 729 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 730 731 /* 732 * Some BIOSes incorrectly force this feature, but only K8 revision D 733 * (model = 0x14) and later actually support it. 734 * (AMD Erratum #110, docId: 25759). 735 */ 736 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 737 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 738 if (!rdmsrl_amd_safe(0xc001100d, &value)) { 739 value &= ~BIT_64(32); 740 wrmsrl_amd_safe(0xc001100d, value); 741 } 742 } 743 744 if (!c->x86_model_id[0]) 745 strcpy(c->x86_model_id, "Hammer"); 746 747 #ifdef CONFIG_SMP 748 /* 749 * Disable TLB flush filter by setting HWCR.FFDIS on K8 750 * bit 6 of msr C001_0015 751 * 752 * Errata 63 for SH-B3 steppings 753 * Errata 122 for all steppings (F+ have it disabled by default) 754 */ 755 msr_set_bit(MSR_K7_HWCR, 6); 756 #endif 757 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); 758 } 759 760 static void init_amd_gh(struct cpuinfo_x86 *c) 761 { 762 #ifdef CONFIG_MMCONF_FAM10H 763 /* do this for boot cpu */ 764 if (c == &boot_cpu_data) 765 check_enable_amd_mmconf_dmi(); 766 767 fam10h_check_enable_mmcfg(); 768 #endif 769 770 /* 771 * Disable GART TLB Walk Errors on Fam10h. We do this here because this 772 * is always needed when GART is enabled, even in a kernel which has no 773 * MCE support built in. BIOS should disable GartTlbWlk Errors already. 774 * If it doesn't, we do it here as suggested by the BKDG. 775 * 776 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 777 */ 778 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); 779 780 /* 781 * On family 10h BIOS may not have properly enabled WC+ support, causing 782 * it to be converted to CD memtype. This may result in performance 783 * degradation for certain nested-paging guests. Prevent this conversion 784 * by clearing bit 24 in MSR_AMD64_BU_CFG2. 785 * 786 * NOTE: we want to use the _safe accessors so as not to #GP kvm 787 * guests on older kvm hosts. 788 */ 789 msr_clear_bit(MSR_AMD64_BU_CFG2, 24); 790 791 if (cpu_has_amd_erratum(c, amd_erratum_383)) 792 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 793 } 794 795 #define MSR_AMD64_DE_CFG 0xC0011029 796 797 static void init_amd_ln(struct cpuinfo_x86 *c) 798 { 799 /* 800 * Apply erratum 665 fix unconditionally so machines without a BIOS 801 * fix work. 802 */ 803 msr_set_bit(MSR_AMD64_DE_CFG, 31); 804 } 805 806 static void init_amd_bd(struct cpuinfo_x86 *c) 807 { 808 u64 value; 809 810 /* 811 * The way access filter has a performance penalty on some workloads. 812 * Disable it on the affected CPUs. 813 */ 814 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 815 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) { 816 value |= 0x1E; 817 wrmsrl_safe(MSR_F15H_IC_CFG, value); 818 } 819 } 820 } 821 822 static void init_amd_zn(struct cpuinfo_x86 *c) 823 { 824 set_cpu_cap(c, X86_FEATURE_ZEN); 825 826 /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ 827 if (!cpu_has(c, X86_FEATURE_CPB)) 828 set_cpu_cap(c, X86_FEATURE_CPB); 829 } 830 831 static void init_amd(struct cpuinfo_x86 *c) 832 { 833 early_init_amd(c); 834 835 /* 836 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 837 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 838 */ 839 clear_cpu_cap(c, 0*32+31); 840 841 if (c->x86 >= 0x10) 842 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 843 844 /* get apicid instead of initial apic id from cpuid */ 845 c->apicid = hard_smp_processor_id(); 846 847 /* K6s reports MCEs but don't actually have all the MSRs */ 848 if (c->x86 < 6) 849 clear_cpu_cap(c, X86_FEATURE_MCE); 850 851 switch (c->x86) { 852 case 4: init_amd_k5(c); break; 853 case 5: init_amd_k6(c); break; 854 case 6: init_amd_k7(c); break; 855 case 0xf: init_amd_k8(c); break; 856 case 0x10: init_amd_gh(c); break; 857 case 0x12: init_amd_ln(c); break; 858 case 0x15: init_amd_bd(c); break; 859 case 0x17: init_amd_zn(c); break; 860 } 861 862 /* 863 * Enable workaround for FXSAVE leak on CPUs 864 * without a XSaveErPtr feature 865 */ 866 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR))) 867 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); 868 869 cpu_detect_cache_sizes(c); 870 871 amd_detect_cmp(c); 872 amd_get_topology(c); 873 srat_detect_node(c); 874 875 init_amd_cacheinfo(c); 876 877 if (cpu_has(c, X86_FEATURE_XMM2)) { 878 unsigned long long val; 879 int ret; 880 881 /* 882 * A serializing LFENCE has less overhead than MFENCE, so 883 * use it for execution serialization. On families which 884 * don't have that MSR, LFENCE is already serializing. 885 * msr_set_bit() uses the safe accessors, too, even if the MSR 886 * is not present. 887 */ 888 msr_set_bit(MSR_F10H_DECFG, 889 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); 890 891 /* 892 * Verify that the MSR write was successful (could be running 893 * under a hypervisor) and only then assume that LFENCE is 894 * serializing. 895 */ 896 ret = rdmsrl_safe(MSR_F10H_DECFG, &val); 897 if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { 898 /* A serializing LFENCE stops RDTSC speculation */ 899 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 900 } else { 901 /* MFENCE stops RDTSC speculation */ 902 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 903 } 904 } 905 906 /* 907 * Family 0x12 and above processors have APIC timer 908 * running in deep C states. 909 */ 910 if (c->x86 > 0x11) 911 set_cpu_cap(c, X86_FEATURE_ARAT); 912 913 /* 3DNow or LM implies PREFETCHW */ 914 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH)) 915 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM)) 916 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH); 917 918 /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */ 919 if (!cpu_has(c, X86_FEATURE_XENPV)) 920 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); 921 } 922 923 #ifdef CONFIG_X86_32 924 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 925 { 926 /* AMD errata T13 (order #21922) */ 927 if (c->x86 == 6) { 928 /* Duron Rev A0 */ 929 if (c->x86_model == 3 && c->x86_stepping == 0) 930 size = 64; 931 /* Tbird rev A1/A2 */ 932 if (c->x86_model == 4 && 933 (c->x86_stepping == 0 || c->x86_stepping == 1)) 934 size = 256; 935 } 936 return size; 937 } 938 #endif 939 940 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 941 { 942 u32 ebx, eax, ecx, edx; 943 u16 mask = 0xfff; 944 945 if (c->x86 < 0xf) 946 return; 947 948 if (c->extended_cpuid_level < 0x80000006) 949 return; 950 951 cpuid(0x80000006, &eax, &ebx, &ecx, &edx); 952 953 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; 954 tlb_lli_4k[ENTRIES] = ebx & mask; 955 956 /* 957 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB 958 * characteristics from the CPUID function 0x80000005 instead. 959 */ 960 if (c->x86 == 0xf) { 961 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 962 mask = 0xff; 963 } 964 965 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 966 if (!((eax >> 16) & mask)) 967 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; 968 else 969 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; 970 971 /* a 4M entry uses two 2M entries */ 972 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; 973 974 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ 975 if (!(eax & mask)) { 976 /* Erratum 658 */ 977 if (c->x86 == 0x15 && c->x86_model <= 0x1f) { 978 tlb_lli_2m[ENTRIES] = 1024; 979 } else { 980 cpuid(0x80000005, &eax, &ebx, &ecx, &edx); 981 tlb_lli_2m[ENTRIES] = eax & 0xff; 982 } 983 } else 984 tlb_lli_2m[ENTRIES] = eax & mask; 985 986 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; 987 } 988 989 static const struct cpu_dev amd_cpu_dev = { 990 .c_vendor = "AMD", 991 .c_ident = { "AuthenticAMD" }, 992 #ifdef CONFIG_X86_32 993 .legacy_models = { 994 { .family = 4, .model_names = 995 { 996 [3] = "486 DX/2", 997 [7] = "486 DX/2-WB", 998 [8] = "486 DX/4", 999 [9] = "486 DX/4-WB", 1000 [14] = "Am5x86-WT", 1001 [15] = "Am5x86-WB" 1002 } 1003 }, 1004 }, 1005 .legacy_cache_size = amd_size_cache, 1006 #endif 1007 .c_early_init = early_init_amd, 1008 .c_detect_tlb = cpu_detect_tlb_amd, 1009 .c_bsp_init = bsp_init_amd, 1010 .c_init = init_amd, 1011 .c_x86_vendor = X86_VENDOR_AMD, 1012 }; 1013 1014 cpu_dev_register(amd_cpu_dev); 1015 1016 /* 1017 * AMD errata checking 1018 * 1019 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or 1020 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 1021 * have an OSVW id assigned, which it takes as first argument. Both take a 1022 * variable number of family-specific model-stepping ranges created by 1023 * AMD_MODEL_RANGE(). 1024 * 1025 * Example: 1026 * 1027 * const int amd_erratum_319[] = 1028 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), 1029 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), 1030 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 1031 */ 1032 1033 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } 1034 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } 1035 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ 1036 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) 1037 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) 1038 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) 1039 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) 1040 1041 static const int amd_erratum_400[] = 1042 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 1043 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 1044 1045 static const int amd_erratum_383[] = 1046 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 1047 1048 1049 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) 1050 { 1051 int osvw_id = *erratum++; 1052 u32 range; 1053 u32 ms; 1054 1055 if (osvw_id >= 0 && osvw_id < 65536 && 1056 cpu_has(cpu, X86_FEATURE_OSVW)) { 1057 u64 osvw_len; 1058 1059 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); 1060 if (osvw_id < osvw_len) { 1061 u64 osvw_bits; 1062 1063 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), 1064 osvw_bits); 1065 return osvw_bits & (1ULL << (osvw_id & 0x3f)); 1066 } 1067 } 1068 1069 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 1070 ms = (cpu->x86_model << 4) | cpu->x86_stepping; 1071 while ((range = *erratum++)) 1072 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 1073 (ms >= AMD_MODEL_RANGE_START(range)) && 1074 (ms <= AMD_MODEL_RANGE_END(range))) 1075 return true; 1076 1077 return false; 1078 } 1079 1080 void set_dr_addr_mask(unsigned long mask, int dr) 1081 { 1082 if (!boot_cpu_has(X86_FEATURE_BPEXT)) 1083 return; 1084 1085 switch (dr) { 1086 case 0: 1087 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0); 1088 break; 1089 case 1: 1090 case 2: 1091 case 3: 1092 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0); 1093 break; 1094 default: 1095 break; 1096 } 1097 } 1098