1 /* Generic MTRR (Memory Type Range Register) driver. 2 3 Copyright (C) 1997-2000 Richard Gooch 4 Copyright (c) 2002 Patrick Mochel 5 6 This library is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Library General Public 8 License as published by the Free Software Foundation; either 9 version 2 of the License, or (at your option) any later version. 10 11 This library is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Library General Public License for more details. 15 16 You should have received a copy of the GNU Library General Public 17 License along with this library; if not, write to the Free 18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 21 The postal address is: 22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 23 24 Source: "Pentium Pro Family Developer's Manual, Volume 3: 25 Operating System Writer's Guide" (Intel document number 242692), 26 section 11.11.7 27 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 29 on 6-7 March 2002. 30 Source: Intel Architecture Software Developers Manual, Volume 3: 31 System Programming Guide; Section 9.11. (1997 edition - PPro). 32 */ 33 34 #include <linux/types.h> /* FIXME: kvm_para.h needs this */ 35 36 #include <linux/stop_machine.h> 37 #include <linux/kvm_para.h> 38 #include <linux/uaccess.h> 39 #include <linux/export.h> 40 #include <linux/mutex.h> 41 #include <linux/init.h> 42 #include <linux/sort.h> 43 #include <linux/cpu.h> 44 #include <linux/pci.h> 45 #include <linux/smp.h> 46 #include <linux/syscore_ops.h> 47 #include <linux/rcupdate.h> 48 49 #include <asm/cpufeature.h> 50 #include <asm/e820/api.h> 51 #include <asm/mtrr.h> 52 #include <asm/msr.h> 53 #include <asm/memtype.h> 54 55 #include "mtrr.h" 56 57 /* arch_phys_wc_add returns an MTRR register index plus this offset. */ 58 #define MTRR_TO_PHYS_WC_OFFSET 1000 59 60 u32 num_var_ranges; 61 static bool __mtrr_enabled; 62 63 static bool mtrr_enabled(void) 64 { 65 return __mtrr_enabled; 66 } 67 68 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 69 static DEFINE_MUTEX(mtrr_mutex); 70 71 u64 size_or_mask, size_and_mask; 72 static bool mtrr_aps_delayed_init; 73 74 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init; 75 76 const struct mtrr_ops *mtrr_if; 77 78 static void set_mtrr(unsigned int reg, unsigned long base, 79 unsigned long size, mtrr_type type); 80 81 void __init set_mtrr_ops(const struct mtrr_ops *ops) 82 { 83 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) 84 mtrr_ops[ops->vendor] = ops; 85 } 86 87 /* Returns non-zero if we have the write-combining memory type */ 88 static int have_wrcomb(void) 89 { 90 struct pci_dev *dev; 91 92 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); 93 if (dev != NULL) { 94 /* 95 * ServerWorks LE chipsets < rev 6 have problems with 96 * write-combining. Don't allow it and leave room for other 97 * chipsets to be tagged 98 */ 99 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 100 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE && 101 dev->revision <= 5) { 102 pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 103 pci_dev_put(dev); 104 return 0; 105 } 106 /* 107 * Intel 450NX errata # 23. Non ascending cacheline evictions to 108 * write combining memory may resulting in data corruption 109 */ 110 if (dev->vendor == PCI_VENDOR_ID_INTEL && 111 dev->device == PCI_DEVICE_ID_INTEL_82451NX) { 112 pr_info("Intel 450NX MMC detected. Write-combining disabled.\n"); 113 pci_dev_put(dev); 114 return 0; 115 } 116 pci_dev_put(dev); 117 } 118 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; 119 } 120 121 /* This function returns the number of variable MTRRs */ 122 static void __init set_num_var_ranges(void) 123 { 124 unsigned long config = 0, dummy; 125 126 if (use_intel()) 127 rdmsr(MSR_MTRRcap, config, dummy); 128 else if (is_cpu(AMD) || is_cpu(HYGON)) 129 config = 2; 130 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) 131 config = 8; 132 133 num_var_ranges = config & 0xff; 134 } 135 136 static void __init init_table(void) 137 { 138 int i, max; 139 140 max = num_var_ranges; 141 for (i = 0; i < max; i++) 142 mtrr_usage_table[i] = 1; 143 } 144 145 struct set_mtrr_data { 146 unsigned long smp_base; 147 unsigned long smp_size; 148 unsigned int smp_reg; 149 mtrr_type smp_type; 150 }; 151 152 /** 153 * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed 154 * by all the CPUs. 155 * @info: pointer to mtrr configuration data 156 * 157 * Returns nothing. 158 */ 159 static int mtrr_rendezvous_handler(void *info) 160 { 161 struct set_mtrr_data *data = info; 162 163 /* 164 * We use this same function to initialize the mtrrs during boot, 165 * resume, runtime cpu online and on an explicit request to set a 166 * specific MTRR. 167 * 168 * During boot or suspend, the state of the boot cpu's mtrrs has been 169 * saved, and we want to replicate that across all the cpus that come 170 * online (either at the end of boot or resume or during a runtime cpu 171 * online). If we're doing that, @reg is set to something special and on 172 * all the cpu's we do mtrr_if->set_all() (On the logical cpu that 173 * started the boot/resume sequence, this might be a duplicate 174 * set_all()). 175 */ 176 if (data->smp_reg != ~0U) { 177 mtrr_if->set(data->smp_reg, data->smp_base, 178 data->smp_size, data->smp_type); 179 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { 180 mtrr_if->set_all(); 181 } 182 return 0; 183 } 184 185 static inline int types_compatible(mtrr_type type1, mtrr_type type2) 186 { 187 return type1 == MTRR_TYPE_UNCACHABLE || 188 type2 == MTRR_TYPE_UNCACHABLE || 189 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 190 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); 191 } 192 193 /** 194 * set_mtrr - update mtrrs on all processors 195 * @reg: mtrr in question 196 * @base: mtrr base 197 * @size: mtrr size 198 * @type: mtrr type 199 * 200 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: 201 * 202 * 1. Queue work to do the following on all processors: 203 * 2. Disable Interrupts 204 * 3. Wait for all procs to do so 205 * 4. Enter no-fill cache mode 206 * 5. Flush caches 207 * 6. Clear PGE bit 208 * 7. Flush all TLBs 209 * 8. Disable all range registers 210 * 9. Update the MTRRs 211 * 10. Enable all range registers 212 * 11. Flush all TLBs and caches again 213 * 12. Enter normal cache mode and reenable caching 214 * 13. Set PGE 215 * 14. Wait for buddies to catch up 216 * 15. Enable interrupts. 217 * 218 * What does that mean for us? Well, stop_machine() will ensure that 219 * the rendezvous handler is started on each CPU. And in lockstep they 220 * do the state transition of disabling interrupts, updating MTRR's 221 * (the CPU vendors may each do it differently, so we call mtrr_if->set() 222 * callback and let them take care of it.) and enabling interrupts. 223 * 224 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 225 * becomes nops. 226 */ 227 static void 228 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) 229 { 230 struct set_mtrr_data data = { .smp_reg = reg, 231 .smp_base = base, 232 .smp_size = size, 233 .smp_type = type 234 }; 235 236 stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask); 237 } 238 239 static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base, 240 unsigned long size, mtrr_type type) 241 { 242 struct set_mtrr_data data = { .smp_reg = reg, 243 .smp_base = base, 244 .smp_size = size, 245 .smp_type = type 246 }; 247 248 stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); 249 } 250 251 static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base, 252 unsigned long size, mtrr_type type) 253 { 254 struct set_mtrr_data data = { .smp_reg = reg, 255 .smp_base = base, 256 .smp_size = size, 257 .smp_type = type 258 }; 259 260 stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data, 261 cpu_callout_mask); 262 } 263 264 /** 265 * mtrr_add_page - Add a memory type region 266 * @base: Physical base address of region in pages (in units of 4 kB!) 267 * @size: Physical size of region in pages (4 kB) 268 * @type: Type of MTRR desired 269 * @increment: If this is true do usage counting on the region 270 * 271 * Memory type region registers control the caching on newer Intel and 272 * non Intel processors. This function allows drivers to request an 273 * MTRR is added. The details and hardware specifics of each processor's 274 * implementation are hidden from the caller, but nevertheless the 275 * caller should expect to need to provide a power of two size on an 276 * equivalent power of two boundary. 277 * 278 * If the region cannot be added either because all regions are in use 279 * or the CPU cannot support it a negative value is returned. On success 280 * the register number for this entry is returned, but should be treated 281 * as a cookie only. 282 * 283 * On a multiprocessor machine the changes are made to all processors. 284 * This is required on x86 by the Intel processors. 285 * 286 * The available types are 287 * 288 * %MTRR_TYPE_UNCACHABLE - No caching 289 * 290 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 291 * 292 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 293 * 294 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 295 * 296 * BUGS: Needs a quiet flag for the cases where drivers do not mind 297 * failures and do not wish system log messages to be sent. 298 */ 299 int mtrr_add_page(unsigned long base, unsigned long size, 300 unsigned int type, bool increment) 301 { 302 unsigned long lbase, lsize; 303 int i, replace, error; 304 mtrr_type ltype; 305 306 if (!mtrr_enabled()) 307 return -ENXIO; 308 309 error = mtrr_if->validate_add_page(base, size, type); 310 if (error) 311 return error; 312 313 if (type >= MTRR_NUM_TYPES) { 314 pr_warn("type: %u invalid\n", type); 315 return -EINVAL; 316 } 317 318 /* If the type is WC, check that this processor supports it */ 319 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { 320 pr_warn("your processor doesn't support write-combining\n"); 321 return -ENOSYS; 322 } 323 324 if (!size) { 325 pr_warn("zero sized request\n"); 326 return -EINVAL; 327 } 328 329 if ((base | (base + size - 1)) >> 330 (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { 331 pr_warn("base or size exceeds the MTRR width\n"); 332 return -EINVAL; 333 } 334 335 error = -EINVAL; 336 replace = -1; 337 338 /* No CPU hotplug when we change MTRR entries */ 339 cpus_read_lock(); 340 341 /* Search for existing MTRR */ 342 mutex_lock(&mtrr_mutex); 343 for (i = 0; i < num_var_ranges; ++i) { 344 mtrr_if->get(i, &lbase, &lsize, <ype); 345 if (!lsize || base > lbase + lsize - 1 || 346 base + size - 1 < lbase) 347 continue; 348 /* 349 * At this point we know there is some kind of 350 * overlap/enclosure 351 */ 352 if (base < lbase || base + size - 1 > lbase + lsize - 1) { 353 if (base <= lbase && 354 base + size - 1 >= lbase + lsize - 1) { 355 /* New region encloses an existing region */ 356 if (type == ltype) { 357 replace = replace == -1 ? i : -2; 358 continue; 359 } else if (types_compatible(type, ltype)) 360 continue; 361 } 362 pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase, 363 lsize); 364 goto out; 365 } 366 /* New region is enclosed by an existing region */ 367 if (ltype != type) { 368 if (types_compatible(type, ltype)) 369 continue; 370 pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n", 371 base, size, mtrr_attrib_to_str(ltype), 372 mtrr_attrib_to_str(type)); 373 goto out; 374 } 375 if (increment) 376 ++mtrr_usage_table[i]; 377 error = i; 378 goto out; 379 } 380 /* Search for an empty MTRR */ 381 i = mtrr_if->get_free_region(base, size, replace); 382 if (i >= 0) { 383 set_mtrr_cpuslocked(i, base, size, type); 384 if (likely(replace < 0)) { 385 mtrr_usage_table[i] = 1; 386 } else { 387 mtrr_usage_table[i] = mtrr_usage_table[replace]; 388 if (increment) 389 mtrr_usage_table[i]++; 390 if (unlikely(replace != i)) { 391 set_mtrr_cpuslocked(replace, 0, 0, 0); 392 mtrr_usage_table[replace] = 0; 393 } 394 } 395 } else { 396 pr_info("no more MTRRs available\n"); 397 } 398 error = i; 399 out: 400 mutex_unlock(&mtrr_mutex); 401 cpus_read_unlock(); 402 return error; 403 } 404 405 static int mtrr_check(unsigned long base, unsigned long size) 406 { 407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 408 pr_warn("size and base must be multiples of 4 kiB\n"); 409 pr_debug("size: 0x%lx base: 0x%lx\n", size, base); 410 dump_stack(); 411 return -1; 412 } 413 return 0; 414 } 415 416 /** 417 * mtrr_add - Add a memory type region 418 * @base: Physical base address of region 419 * @size: Physical size of region 420 * @type: Type of MTRR desired 421 * @increment: If this is true do usage counting on the region 422 * 423 * Memory type region registers control the caching on newer Intel and 424 * non Intel processors. This function allows drivers to request an 425 * MTRR is added. The details and hardware specifics of each processor's 426 * implementation are hidden from the caller, but nevertheless the 427 * caller should expect to need to provide a power of two size on an 428 * equivalent power of two boundary. 429 * 430 * If the region cannot be added either because all regions are in use 431 * or the CPU cannot support it a negative value is returned. On success 432 * the register number for this entry is returned, but should be treated 433 * as a cookie only. 434 * 435 * On a multiprocessor machine the changes are made to all processors. 436 * This is required on x86 by the Intel processors. 437 * 438 * The available types are 439 * 440 * %MTRR_TYPE_UNCACHABLE - No caching 441 * 442 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 443 * 444 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 445 * 446 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 447 * 448 * BUGS: Needs a quiet flag for the cases where drivers do not mind 449 * failures and do not wish system log messages to be sent. 450 */ 451 int mtrr_add(unsigned long base, unsigned long size, unsigned int type, 452 bool increment) 453 { 454 if (!mtrr_enabled()) 455 return -ENODEV; 456 if (mtrr_check(base, size)) 457 return -EINVAL; 458 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 459 increment); 460 } 461 462 /** 463 * mtrr_del_page - delete a memory type region 464 * @reg: Register returned by mtrr_add 465 * @base: Physical base address 466 * @size: Size of region 467 * 468 * If register is supplied then base and size are ignored. This is 469 * how drivers should call it. 470 * 471 * Releases an MTRR region. If the usage count drops to zero the 472 * register is freed and the region returns to default state. 473 * On success the register is returned, on failure a negative error 474 * code. 475 */ 476 int mtrr_del_page(int reg, unsigned long base, unsigned long size) 477 { 478 int i, max; 479 mtrr_type ltype; 480 unsigned long lbase, lsize; 481 int error = -EINVAL; 482 483 if (!mtrr_enabled()) 484 return -ENODEV; 485 486 max = num_var_ranges; 487 /* No CPU hotplug when we change MTRR entries */ 488 cpus_read_lock(); 489 mutex_lock(&mtrr_mutex); 490 if (reg < 0) { 491 /* Search for existing MTRR */ 492 for (i = 0; i < max; ++i) { 493 mtrr_if->get(i, &lbase, &lsize, <ype); 494 if (lbase == base && lsize == size) { 495 reg = i; 496 break; 497 } 498 } 499 if (reg < 0) { 500 pr_debug("no MTRR for %lx000,%lx000 found\n", 501 base, size); 502 goto out; 503 } 504 } 505 if (reg >= max) { 506 pr_warn("register: %d too big\n", reg); 507 goto out; 508 } 509 mtrr_if->get(reg, &lbase, &lsize, <ype); 510 if (lsize < 1) { 511 pr_warn("MTRR %d not used\n", reg); 512 goto out; 513 } 514 if (mtrr_usage_table[reg] < 1) { 515 pr_warn("reg: %d has count=0\n", reg); 516 goto out; 517 } 518 if (--mtrr_usage_table[reg] < 1) 519 set_mtrr_cpuslocked(reg, 0, 0, 0); 520 error = reg; 521 out: 522 mutex_unlock(&mtrr_mutex); 523 cpus_read_unlock(); 524 return error; 525 } 526 527 /** 528 * mtrr_del - delete a memory type region 529 * @reg: Register returned by mtrr_add 530 * @base: Physical base address 531 * @size: Size of region 532 * 533 * If register is supplied then base and size are ignored. This is 534 * how drivers should call it. 535 * 536 * Releases an MTRR region. If the usage count drops to zero the 537 * register is freed and the region returns to default state. 538 * On success the register is returned, on failure a negative error 539 * code. 540 */ 541 int mtrr_del(int reg, unsigned long base, unsigned long size) 542 { 543 if (!mtrr_enabled()) 544 return -ENODEV; 545 if (mtrr_check(base, size)) 546 return -EINVAL; 547 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 548 } 549 550 /** 551 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable 552 * @base: Physical base address 553 * @size: Size of region 554 * 555 * If PAT is available, this does nothing. If PAT is unavailable, it 556 * attempts to add a WC MTRR covering size bytes starting at base and 557 * logs an error if this fails. 558 * 559 * The called should provide a power of two size on an equivalent 560 * power of two boundary. 561 * 562 * Drivers must store the return value to pass to mtrr_del_wc_if_needed, 563 * but drivers should not try to interpret that return value. 564 */ 565 int arch_phys_wc_add(unsigned long base, unsigned long size) 566 { 567 int ret; 568 569 if (pat_enabled() || !mtrr_enabled()) 570 return 0; /* Success! (We don't need to do anything.) */ 571 572 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); 573 if (ret < 0) { 574 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.", 575 (void *)base, (void *)(base + size - 1)); 576 return ret; 577 } 578 return ret + MTRR_TO_PHYS_WC_OFFSET; 579 } 580 EXPORT_SYMBOL(arch_phys_wc_add); 581 582 /* 583 * arch_phys_wc_del - undoes arch_phys_wc_add 584 * @handle: Return value from arch_phys_wc_add 585 * 586 * This cleans up after mtrr_add_wc_if_needed. 587 * 588 * The API guarantees that mtrr_del_wc_if_needed(error code) and 589 * mtrr_del_wc_if_needed(0) do nothing. 590 */ 591 void arch_phys_wc_del(int handle) 592 { 593 if (handle >= 1) { 594 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET); 595 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0); 596 } 597 } 598 EXPORT_SYMBOL(arch_phys_wc_del); 599 600 /* 601 * arch_phys_wc_index - translates arch_phys_wc_add's return value 602 * @handle: Return value from arch_phys_wc_add 603 * 604 * This will turn the return value from arch_phys_wc_add into an mtrr 605 * index suitable for debugging. 606 * 607 * Note: There is no legitimate use for this function, except possibly 608 * in printk line. Alas there is an illegitimate use in some ancient 609 * drm ioctls. 610 */ 611 int arch_phys_wc_index(int handle) 612 { 613 if (handle < MTRR_TO_PHYS_WC_OFFSET) 614 return -1; 615 else 616 return handle - MTRR_TO_PHYS_WC_OFFSET; 617 } 618 EXPORT_SYMBOL_GPL(arch_phys_wc_index); 619 620 /* 621 * HACK ALERT! 622 * These should be called implicitly, but we can't yet until all the initcall 623 * stuff is done... 624 */ 625 static void __init init_ifs(void) 626 { 627 #ifndef CONFIG_X86_64 628 amd_init_mtrr(); 629 cyrix_init_mtrr(); 630 centaur_init_mtrr(); 631 #endif 632 } 633 634 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic 635 * MTRR driver doesn't require this 636 */ 637 struct mtrr_value { 638 mtrr_type ltype; 639 unsigned long lbase; 640 unsigned long lsize; 641 }; 642 643 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 644 645 static int mtrr_save(void) 646 { 647 int i; 648 649 for (i = 0; i < num_var_ranges; i++) { 650 mtrr_if->get(i, &mtrr_value[i].lbase, 651 &mtrr_value[i].lsize, 652 &mtrr_value[i].ltype); 653 } 654 return 0; 655 } 656 657 static void mtrr_restore(void) 658 { 659 int i; 660 661 for (i = 0; i < num_var_ranges; i++) { 662 if (mtrr_value[i].lsize) { 663 set_mtrr(i, mtrr_value[i].lbase, 664 mtrr_value[i].lsize, 665 mtrr_value[i].ltype); 666 } 667 } 668 } 669 670 671 672 static struct syscore_ops mtrr_syscore_ops = { 673 .suspend = mtrr_save, 674 .resume = mtrr_restore, 675 }; 676 677 int __initdata changed_by_mtrr_cleanup; 678 679 #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1)) 680 /** 681 * mtrr_bp_init - initialize mtrrs on the boot CPU 682 * 683 * This needs to be called early; before any of the other CPUs are 684 * initialized (i.e. before smp_init()). 685 * 686 */ 687 void __init mtrr_bp_init(void) 688 { 689 u32 phys_addr; 690 691 init_ifs(); 692 693 phys_addr = 32; 694 695 if (boot_cpu_has(X86_FEATURE_MTRR)) { 696 mtrr_if = &generic_mtrr_ops; 697 size_or_mask = SIZE_OR_MASK_BITS(36); 698 size_and_mask = 0x00f00000; 699 phys_addr = 36; 700 701 /* 702 * This is an AMD specific MSR, but we assume(hope?) that 703 * Intel will implement it too when they extend the address 704 * bus of the Xeon. 705 */ 706 if (cpuid_eax(0x80000000) >= 0x80000008) { 707 phys_addr = cpuid_eax(0x80000008) & 0xff; 708 /* CPUID workaround for Intel 0F33/0F34 CPU */ 709 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 710 boot_cpu_data.x86 == 0xF && 711 boot_cpu_data.x86_model == 0x3 && 712 (boot_cpu_data.x86_stepping == 0x3 || 713 boot_cpu_data.x86_stepping == 0x4)) 714 phys_addr = 36; 715 716 size_or_mask = SIZE_OR_MASK_BITS(phys_addr); 717 size_and_mask = ~size_or_mask & 0xfffff00000ULL; 718 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 719 boot_cpu_data.x86 == 6) { 720 /* 721 * VIA C* family have Intel style MTRRs, 722 * but don't support PAE 723 */ 724 size_or_mask = SIZE_OR_MASK_BITS(32); 725 size_and_mask = 0; 726 phys_addr = 32; 727 } 728 } else { 729 switch (boot_cpu_data.x86_vendor) { 730 case X86_VENDOR_AMD: 731 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) { 732 /* Pre-Athlon (K6) AMD CPU MTRRs */ 733 mtrr_if = mtrr_ops[X86_VENDOR_AMD]; 734 size_or_mask = SIZE_OR_MASK_BITS(32); 735 size_and_mask = 0; 736 } 737 break; 738 case X86_VENDOR_CENTAUR: 739 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) { 740 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; 741 size_or_mask = SIZE_OR_MASK_BITS(32); 742 size_and_mask = 0; 743 } 744 break; 745 case X86_VENDOR_CYRIX: 746 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) { 747 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; 748 size_or_mask = SIZE_OR_MASK_BITS(32); 749 size_and_mask = 0; 750 } 751 break; 752 default: 753 break; 754 } 755 } 756 757 if (mtrr_if) { 758 __mtrr_enabled = true; 759 set_num_var_ranges(); 760 init_table(); 761 if (use_intel()) { 762 /* BIOS may override */ 763 __mtrr_enabled = get_mtrr_state(); 764 765 if (mtrr_enabled()) 766 mtrr_bp_pat_init(); 767 768 if (mtrr_cleanup(phys_addr)) { 769 changed_by_mtrr_cleanup = 1; 770 mtrr_if->set_all(); 771 } 772 } 773 } 774 775 if (!mtrr_enabled()) { 776 pr_info("Disabled\n"); 777 778 /* 779 * PAT initialization relies on MTRR's rendezvous handler. 780 * Skip PAT init until the handler can initialize both 781 * features independently. 782 */ 783 pat_disable("MTRRs disabled, skipping PAT initialization too."); 784 } 785 } 786 787 void mtrr_ap_init(void) 788 { 789 if (!mtrr_enabled()) 790 return; 791 792 if (!use_intel() || mtrr_aps_delayed_init) 793 return; 794 795 /* 796 * Ideally we should hold mtrr_mutex here to avoid mtrr entries 797 * changed, but this routine will be called in cpu boot time, 798 * holding the lock breaks it. 799 * 800 * This routine is called in two cases: 801 * 802 * 1. very early time of software resume, when there absolutely 803 * isn't mtrr entry changes; 804 * 805 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug 806 * lock to prevent mtrr entry changes 807 */ 808 set_mtrr_from_inactive_cpu(~0U, 0, 0, 0); 809 } 810 811 /** 812 * mtrr_save_state - Save current fixed-range MTRR state of the first 813 * cpu in cpu_online_mask. 814 */ 815 void mtrr_save_state(void) 816 { 817 int first_cpu; 818 819 if (!mtrr_enabled()) 820 return; 821 822 first_cpu = cpumask_first(cpu_online_mask); 823 smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); 824 } 825 826 void set_mtrr_aps_delayed_init(void) 827 { 828 if (!mtrr_enabled()) 829 return; 830 if (!use_intel()) 831 return; 832 833 mtrr_aps_delayed_init = true; 834 } 835 836 /* 837 * Delayed MTRR initialization for all AP's 838 */ 839 void mtrr_aps_init(void) 840 { 841 if (!use_intel() || !mtrr_enabled()) 842 return; 843 844 /* 845 * Check if someone has requested the delay of AP MTRR initialization, 846 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, 847 * then we are done. 848 */ 849 if (!mtrr_aps_delayed_init) 850 return; 851 852 set_mtrr(~0U, 0, 0, 0); 853 mtrr_aps_delayed_init = false; 854 } 855 856 void mtrr_bp_restore(void) 857 { 858 if (!use_intel() || !mtrr_enabled()) 859 return; 860 861 mtrr_if->set_all(); 862 } 863 864 static int __init mtrr_init_finialize(void) 865 { 866 if (!mtrr_enabled()) 867 return 0; 868 869 if (use_intel()) { 870 if (!changed_by_mtrr_cleanup) 871 mtrr_state_warn(); 872 return 0; 873 } 874 875 /* 876 * The CPU has no MTRR and seems to not support SMP. They have 877 * specific drivers, we use a tricky method to support 878 * suspend/resume for them. 879 * 880 * TBD: is there any system with such CPU which supports 881 * suspend/resume? If no, we should remove the code. 882 */ 883 register_syscore_ops(&mtrr_syscore_ops); 884 885 return 0; 886 } 887 subsys_initcall(mtrr_init_finialize); 888