1 /* Generic MTRR (Memory Type Range Register) driver. 2 3 Copyright (C) 1997-2000 Richard Gooch 4 Copyright (c) 2002 Patrick Mochel 5 6 This library is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Library General Public 8 License as published by the Free Software Foundation; either 9 version 2 of the License, or (at your option) any later version. 10 11 This library is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Library General Public License for more details. 15 16 You should have received a copy of the GNU Library General Public 17 License along with this library; if not, write to the Free 18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au 21 The postal address is: 22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. 23 24 Source: "Pentium Pro Family Developer's Manual, Volume 3: 25 Operating System Writer's Guide" (Intel document number 242692), 26 section 11.11.7 27 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 29 on 6-7 March 2002. 30 Source: Intel Architecture Software Developers Manual, Volume 3: 31 System Programming Guide; Section 9.11. (1997 edition - PPro). 32 */ 33 34 #include <linux/types.h> /* FIXME: kvm_para.h needs this */ 35 36 #include <linux/stop_machine.h> 37 #include <linux/kvm_para.h> 38 #include <linux/uaccess.h> 39 #include <linux/export.h> 40 #include <linux/mutex.h> 41 #include <linux/init.h> 42 #include <linux/sort.h> 43 #include <linux/cpu.h> 44 #include <linux/pci.h> 45 #include <linux/smp.h> 46 #include <linux/syscore_ops.h> 47 #include <linux/rcupdate.h> 48 49 #include <asm/cacheinfo.h> 50 #include <asm/cpufeature.h> 51 #include <asm/e820/api.h> 52 #include <asm/mtrr.h> 53 #include <asm/msr.h> 54 #include <asm/memtype.h> 55 56 #include "mtrr.h" 57 58 /* arch_phys_wc_add returns an MTRR register index plus this offset. */ 59 #define MTRR_TO_PHYS_WC_OFFSET 1000 60 61 u32 num_var_ranges; 62 static bool mtrr_enabled(void) 63 { 64 return !!mtrr_if; 65 } 66 67 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 68 static DEFINE_MUTEX(mtrr_mutex); 69 70 u64 size_or_mask, size_and_mask; 71 72 const struct mtrr_ops *mtrr_if; 73 74 /* Returns non-zero if we have the write-combining memory type */ 75 static int have_wrcomb(void) 76 { 77 struct pci_dev *dev; 78 79 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL); 80 if (dev != NULL) { 81 /* 82 * ServerWorks LE chipsets < rev 6 have problems with 83 * write-combining. Don't allow it and leave room for other 84 * chipsets to be tagged 85 */ 86 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 87 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE && 88 dev->revision <= 5) { 89 pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 90 pci_dev_put(dev); 91 return 0; 92 } 93 /* 94 * Intel 450NX errata # 23. Non ascending cacheline evictions to 95 * write combining memory may resulting in data corruption 96 */ 97 if (dev->vendor == PCI_VENDOR_ID_INTEL && 98 dev->device == PCI_DEVICE_ID_INTEL_82451NX) { 99 pr_info("Intel 450NX MMC detected. Write-combining disabled.\n"); 100 pci_dev_put(dev); 101 return 0; 102 } 103 pci_dev_put(dev); 104 } 105 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0; 106 } 107 108 /* This function returns the number of variable MTRRs */ 109 static void __init set_num_var_ranges(bool use_generic) 110 { 111 unsigned long config = 0, dummy; 112 113 if (use_generic) 114 rdmsr(MSR_MTRRcap, config, dummy); 115 else if (is_cpu(AMD) || is_cpu(HYGON)) 116 config = 2; 117 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) 118 config = 8; 119 120 num_var_ranges = config & 0xff; 121 } 122 123 static void __init init_table(void) 124 { 125 int i, max; 126 127 max = num_var_ranges; 128 for (i = 0; i < max; i++) 129 mtrr_usage_table[i] = 1; 130 } 131 132 struct set_mtrr_data { 133 unsigned long smp_base; 134 unsigned long smp_size; 135 unsigned int smp_reg; 136 mtrr_type smp_type; 137 }; 138 139 /** 140 * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed 141 * by all the CPUs. 142 * @info: pointer to mtrr configuration data 143 * 144 * Returns nothing. 145 */ 146 static int mtrr_rendezvous_handler(void *info) 147 { 148 struct set_mtrr_data *data = info; 149 150 mtrr_if->set(data->smp_reg, data->smp_base, 151 data->smp_size, data->smp_type); 152 return 0; 153 } 154 155 static inline int types_compatible(mtrr_type type1, mtrr_type type2) 156 { 157 return type1 == MTRR_TYPE_UNCACHABLE || 158 type2 == MTRR_TYPE_UNCACHABLE || 159 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 160 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH); 161 } 162 163 /** 164 * set_mtrr - update mtrrs on all processors 165 * @reg: mtrr in question 166 * @base: mtrr base 167 * @size: mtrr size 168 * @type: mtrr type 169 * 170 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: 171 * 172 * 1. Queue work to do the following on all processors: 173 * 2. Disable Interrupts 174 * 3. Wait for all procs to do so 175 * 4. Enter no-fill cache mode 176 * 5. Flush caches 177 * 6. Clear PGE bit 178 * 7. Flush all TLBs 179 * 8. Disable all range registers 180 * 9. Update the MTRRs 181 * 10. Enable all range registers 182 * 11. Flush all TLBs and caches again 183 * 12. Enter normal cache mode and reenable caching 184 * 13. Set PGE 185 * 14. Wait for buddies to catch up 186 * 15. Enable interrupts. 187 * 188 * What does that mean for us? Well, stop_machine() will ensure that 189 * the rendezvous handler is started on each CPU. And in lockstep they 190 * do the state transition of disabling interrupts, updating MTRR's 191 * (the CPU vendors may each do it differently, so we call mtrr_if->set() 192 * callback and let them take care of it.) and enabling interrupts. 193 * 194 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 195 * becomes nops. 196 */ 197 static void 198 set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type) 199 { 200 struct set_mtrr_data data = { .smp_reg = reg, 201 .smp_base = base, 202 .smp_size = size, 203 .smp_type = type 204 }; 205 206 stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask); 207 } 208 209 static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base, 210 unsigned long size, mtrr_type type) 211 { 212 struct set_mtrr_data data = { .smp_reg = reg, 213 .smp_base = base, 214 .smp_size = size, 215 .smp_type = type 216 }; 217 218 stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask); 219 } 220 221 /** 222 * mtrr_add_page - Add a memory type region 223 * @base: Physical base address of region in pages (in units of 4 kB!) 224 * @size: Physical size of region in pages (4 kB) 225 * @type: Type of MTRR desired 226 * @increment: If this is true do usage counting on the region 227 * 228 * Memory type region registers control the caching on newer Intel and 229 * non Intel processors. This function allows drivers to request an 230 * MTRR is added. The details and hardware specifics of each processor's 231 * implementation are hidden from the caller, but nevertheless the 232 * caller should expect to need to provide a power of two size on an 233 * equivalent power of two boundary. 234 * 235 * If the region cannot be added either because all regions are in use 236 * or the CPU cannot support it a negative value is returned. On success 237 * the register number for this entry is returned, but should be treated 238 * as a cookie only. 239 * 240 * On a multiprocessor machine the changes are made to all processors. 241 * This is required on x86 by the Intel processors. 242 * 243 * The available types are 244 * 245 * %MTRR_TYPE_UNCACHABLE - No caching 246 * 247 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 248 * 249 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 250 * 251 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 252 * 253 * BUGS: Needs a quiet flag for the cases where drivers do not mind 254 * failures and do not wish system log messages to be sent. 255 */ 256 int mtrr_add_page(unsigned long base, unsigned long size, 257 unsigned int type, bool increment) 258 { 259 unsigned long lbase, lsize; 260 int i, replace, error; 261 mtrr_type ltype; 262 263 if (!mtrr_enabled()) 264 return -ENXIO; 265 266 error = mtrr_if->validate_add_page(base, size, type); 267 if (error) 268 return error; 269 270 if (type >= MTRR_NUM_TYPES) { 271 pr_warn("type: %u invalid\n", type); 272 return -EINVAL; 273 } 274 275 /* If the type is WC, check that this processor supports it */ 276 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { 277 pr_warn("your processor doesn't support write-combining\n"); 278 return -ENOSYS; 279 } 280 281 if (!size) { 282 pr_warn("zero sized request\n"); 283 return -EINVAL; 284 } 285 286 if ((base | (base + size - 1)) >> 287 (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) { 288 pr_warn("base or size exceeds the MTRR width\n"); 289 return -EINVAL; 290 } 291 292 error = -EINVAL; 293 replace = -1; 294 295 /* No CPU hotplug when we change MTRR entries */ 296 cpus_read_lock(); 297 298 /* Search for existing MTRR */ 299 mutex_lock(&mtrr_mutex); 300 for (i = 0; i < num_var_ranges; ++i) { 301 mtrr_if->get(i, &lbase, &lsize, <ype); 302 if (!lsize || base > lbase + lsize - 1 || 303 base + size - 1 < lbase) 304 continue; 305 /* 306 * At this point we know there is some kind of 307 * overlap/enclosure 308 */ 309 if (base < lbase || base + size - 1 > lbase + lsize - 1) { 310 if (base <= lbase && 311 base + size - 1 >= lbase + lsize - 1) { 312 /* New region encloses an existing region */ 313 if (type == ltype) { 314 replace = replace == -1 ? i : -2; 315 continue; 316 } else if (types_compatible(type, ltype)) 317 continue; 318 } 319 pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase, 320 lsize); 321 goto out; 322 } 323 /* New region is enclosed by an existing region */ 324 if (ltype != type) { 325 if (types_compatible(type, ltype)) 326 continue; 327 pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n", 328 base, size, mtrr_attrib_to_str(ltype), 329 mtrr_attrib_to_str(type)); 330 goto out; 331 } 332 if (increment) 333 ++mtrr_usage_table[i]; 334 error = i; 335 goto out; 336 } 337 /* Search for an empty MTRR */ 338 i = mtrr_if->get_free_region(base, size, replace); 339 if (i >= 0) { 340 set_mtrr_cpuslocked(i, base, size, type); 341 if (likely(replace < 0)) { 342 mtrr_usage_table[i] = 1; 343 } else { 344 mtrr_usage_table[i] = mtrr_usage_table[replace]; 345 if (increment) 346 mtrr_usage_table[i]++; 347 if (unlikely(replace != i)) { 348 set_mtrr_cpuslocked(replace, 0, 0, 0); 349 mtrr_usage_table[replace] = 0; 350 } 351 } 352 } else { 353 pr_info("no more MTRRs available\n"); 354 } 355 error = i; 356 out: 357 mutex_unlock(&mtrr_mutex); 358 cpus_read_unlock(); 359 return error; 360 } 361 362 static int mtrr_check(unsigned long base, unsigned long size) 363 { 364 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 365 pr_warn("size and base must be multiples of 4 kiB\n"); 366 pr_debug("size: 0x%lx base: 0x%lx\n", size, base); 367 dump_stack(); 368 return -1; 369 } 370 return 0; 371 } 372 373 /** 374 * mtrr_add - Add a memory type region 375 * @base: Physical base address of region 376 * @size: Physical size of region 377 * @type: Type of MTRR desired 378 * @increment: If this is true do usage counting on the region 379 * 380 * Memory type region registers control the caching on newer Intel and 381 * non Intel processors. This function allows drivers to request an 382 * MTRR is added. The details and hardware specifics of each processor's 383 * implementation are hidden from the caller, but nevertheless the 384 * caller should expect to need to provide a power of two size on an 385 * equivalent power of two boundary. 386 * 387 * If the region cannot be added either because all regions are in use 388 * or the CPU cannot support it a negative value is returned. On success 389 * the register number for this entry is returned, but should be treated 390 * as a cookie only. 391 * 392 * On a multiprocessor machine the changes are made to all processors. 393 * This is required on x86 by the Intel processors. 394 * 395 * The available types are 396 * 397 * %MTRR_TYPE_UNCACHABLE - No caching 398 * 399 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 400 * 401 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 402 * 403 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 404 * 405 * BUGS: Needs a quiet flag for the cases where drivers do not mind 406 * failures and do not wish system log messages to be sent. 407 */ 408 int mtrr_add(unsigned long base, unsigned long size, unsigned int type, 409 bool increment) 410 { 411 if (!mtrr_enabled()) 412 return -ENODEV; 413 if (mtrr_check(base, size)) 414 return -EINVAL; 415 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 416 increment); 417 } 418 419 /** 420 * mtrr_del_page - delete a memory type region 421 * @reg: Register returned by mtrr_add 422 * @base: Physical base address 423 * @size: Size of region 424 * 425 * If register is supplied then base and size are ignored. This is 426 * how drivers should call it. 427 * 428 * Releases an MTRR region. If the usage count drops to zero the 429 * register is freed and the region returns to default state. 430 * On success the register is returned, on failure a negative error 431 * code. 432 */ 433 int mtrr_del_page(int reg, unsigned long base, unsigned long size) 434 { 435 int i, max; 436 mtrr_type ltype; 437 unsigned long lbase, lsize; 438 int error = -EINVAL; 439 440 if (!mtrr_enabled()) 441 return -ENODEV; 442 443 max = num_var_ranges; 444 /* No CPU hotplug when we change MTRR entries */ 445 cpus_read_lock(); 446 mutex_lock(&mtrr_mutex); 447 if (reg < 0) { 448 /* Search for existing MTRR */ 449 for (i = 0; i < max; ++i) { 450 mtrr_if->get(i, &lbase, &lsize, <ype); 451 if (lbase == base && lsize == size) { 452 reg = i; 453 break; 454 } 455 } 456 if (reg < 0) { 457 pr_debug("no MTRR for %lx000,%lx000 found\n", 458 base, size); 459 goto out; 460 } 461 } 462 if (reg >= max) { 463 pr_warn("register: %d too big\n", reg); 464 goto out; 465 } 466 mtrr_if->get(reg, &lbase, &lsize, <ype); 467 if (lsize < 1) { 468 pr_warn("MTRR %d not used\n", reg); 469 goto out; 470 } 471 if (mtrr_usage_table[reg] < 1) { 472 pr_warn("reg: %d has count=0\n", reg); 473 goto out; 474 } 475 if (--mtrr_usage_table[reg] < 1) 476 set_mtrr_cpuslocked(reg, 0, 0, 0); 477 error = reg; 478 out: 479 mutex_unlock(&mtrr_mutex); 480 cpus_read_unlock(); 481 return error; 482 } 483 484 /** 485 * mtrr_del - delete a memory type region 486 * @reg: Register returned by mtrr_add 487 * @base: Physical base address 488 * @size: Size of region 489 * 490 * If register is supplied then base and size are ignored. This is 491 * how drivers should call it. 492 * 493 * Releases an MTRR region. If the usage count drops to zero the 494 * register is freed and the region returns to default state. 495 * On success the register is returned, on failure a negative error 496 * code. 497 */ 498 int mtrr_del(int reg, unsigned long base, unsigned long size) 499 { 500 if (!mtrr_enabled()) 501 return -ENODEV; 502 if (mtrr_check(base, size)) 503 return -EINVAL; 504 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 505 } 506 507 /** 508 * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable 509 * @base: Physical base address 510 * @size: Size of region 511 * 512 * If PAT is available, this does nothing. If PAT is unavailable, it 513 * attempts to add a WC MTRR covering size bytes starting at base and 514 * logs an error if this fails. 515 * 516 * The called should provide a power of two size on an equivalent 517 * power of two boundary. 518 * 519 * Drivers must store the return value to pass to mtrr_del_wc_if_needed, 520 * but drivers should not try to interpret that return value. 521 */ 522 int arch_phys_wc_add(unsigned long base, unsigned long size) 523 { 524 int ret; 525 526 if (pat_enabled() || !mtrr_enabled()) 527 return 0; /* Success! (We don't need to do anything.) */ 528 529 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); 530 if (ret < 0) { 531 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.", 532 (void *)base, (void *)(base + size - 1)); 533 return ret; 534 } 535 return ret + MTRR_TO_PHYS_WC_OFFSET; 536 } 537 EXPORT_SYMBOL(arch_phys_wc_add); 538 539 /* 540 * arch_phys_wc_del - undoes arch_phys_wc_add 541 * @handle: Return value from arch_phys_wc_add 542 * 543 * This cleans up after mtrr_add_wc_if_needed. 544 * 545 * The API guarantees that mtrr_del_wc_if_needed(error code) and 546 * mtrr_del_wc_if_needed(0) do nothing. 547 */ 548 void arch_phys_wc_del(int handle) 549 { 550 if (handle >= 1) { 551 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET); 552 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0); 553 } 554 } 555 EXPORT_SYMBOL(arch_phys_wc_del); 556 557 /* 558 * arch_phys_wc_index - translates arch_phys_wc_add's return value 559 * @handle: Return value from arch_phys_wc_add 560 * 561 * This will turn the return value from arch_phys_wc_add into an mtrr 562 * index suitable for debugging. 563 * 564 * Note: There is no legitimate use for this function, except possibly 565 * in printk line. Alas there is an illegitimate use in some ancient 566 * drm ioctls. 567 */ 568 int arch_phys_wc_index(int handle) 569 { 570 if (handle < MTRR_TO_PHYS_WC_OFFSET) 571 return -1; 572 else 573 return handle - MTRR_TO_PHYS_WC_OFFSET; 574 } 575 EXPORT_SYMBOL_GPL(arch_phys_wc_index); 576 577 /* The suspend/resume methods are only for CPU without MTRR. CPU using generic 578 * MTRR driver doesn't require this 579 */ 580 struct mtrr_value { 581 mtrr_type ltype; 582 unsigned long lbase; 583 unsigned long lsize; 584 }; 585 586 static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 587 588 static int mtrr_save(void) 589 { 590 int i; 591 592 for (i = 0; i < num_var_ranges; i++) { 593 mtrr_if->get(i, &mtrr_value[i].lbase, 594 &mtrr_value[i].lsize, 595 &mtrr_value[i].ltype); 596 } 597 return 0; 598 } 599 600 static void mtrr_restore(void) 601 { 602 int i; 603 604 for (i = 0; i < num_var_ranges; i++) { 605 if (mtrr_value[i].lsize) { 606 set_mtrr(i, mtrr_value[i].lbase, 607 mtrr_value[i].lsize, 608 mtrr_value[i].ltype); 609 } 610 } 611 } 612 613 614 615 static struct syscore_ops mtrr_syscore_ops = { 616 .suspend = mtrr_save, 617 .resume = mtrr_restore, 618 }; 619 620 int __initdata changed_by_mtrr_cleanup; 621 622 #define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1)) 623 /** 624 * mtrr_bp_init - initialize mtrrs on the boot CPU 625 * 626 * This needs to be called early; before any of the other CPUs are 627 * initialized (i.e. before smp_init()). 628 * 629 */ 630 void __init mtrr_bp_init(void) 631 { 632 const char *why = "(not available)"; 633 u32 phys_addr; 634 635 phys_addr = 32; 636 637 if (boot_cpu_has(X86_FEATURE_MTRR)) { 638 mtrr_if = &generic_mtrr_ops; 639 size_or_mask = SIZE_OR_MASK_BITS(36); 640 size_and_mask = 0x00f00000; 641 phys_addr = 36; 642 643 /* 644 * This is an AMD specific MSR, but we assume(hope?) that 645 * Intel will implement it too when they extend the address 646 * bus of the Xeon. 647 */ 648 if (cpuid_eax(0x80000000) >= 0x80000008) { 649 phys_addr = cpuid_eax(0x80000008) & 0xff; 650 /* CPUID workaround for Intel 0F33/0F34 CPU */ 651 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 652 boot_cpu_data.x86 == 0xF && 653 boot_cpu_data.x86_model == 0x3 && 654 (boot_cpu_data.x86_stepping == 0x3 || 655 boot_cpu_data.x86_stepping == 0x4)) 656 phys_addr = 36; 657 658 size_or_mask = SIZE_OR_MASK_BITS(phys_addr); 659 size_and_mask = ~size_or_mask & 0xfffff00000ULL; 660 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 661 boot_cpu_data.x86 == 6) { 662 /* 663 * VIA C* family have Intel style MTRRs, 664 * but don't support PAE 665 */ 666 size_or_mask = SIZE_OR_MASK_BITS(32); 667 size_and_mask = 0; 668 phys_addr = 32; 669 } 670 } else { 671 switch (boot_cpu_data.x86_vendor) { 672 case X86_VENDOR_AMD: 673 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) { 674 /* Pre-Athlon (K6) AMD CPU MTRRs */ 675 mtrr_if = &amd_mtrr_ops; 676 size_or_mask = SIZE_OR_MASK_BITS(32); 677 size_and_mask = 0; 678 } 679 break; 680 case X86_VENDOR_CENTAUR: 681 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) { 682 mtrr_if = ¢aur_mtrr_ops; 683 size_or_mask = SIZE_OR_MASK_BITS(32); 684 size_and_mask = 0; 685 } 686 break; 687 case X86_VENDOR_CYRIX: 688 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) { 689 mtrr_if = &cyrix_mtrr_ops; 690 size_or_mask = SIZE_OR_MASK_BITS(32); 691 size_and_mask = 0; 692 } 693 break; 694 default: 695 break; 696 } 697 } 698 699 if (mtrr_enabled()) { 700 set_num_var_ranges(mtrr_if == &generic_mtrr_ops); 701 init_table(); 702 if (mtrr_if == &generic_mtrr_ops) { 703 /* BIOS may override */ 704 if (get_mtrr_state()) { 705 memory_caching_control |= CACHE_MTRR; 706 changed_by_mtrr_cleanup = mtrr_cleanup(phys_addr); 707 } else { 708 mtrr_if = NULL; 709 why = "by BIOS"; 710 } 711 } 712 } 713 714 if (!mtrr_enabled()) 715 pr_info("MTRRs disabled %s\n", why); 716 } 717 718 /** 719 * mtrr_save_state - Save current fixed-range MTRR state of the first 720 * cpu in cpu_online_mask. 721 */ 722 void mtrr_save_state(void) 723 { 724 int first_cpu; 725 726 if (!mtrr_enabled()) 727 return; 728 729 first_cpu = cpumask_first(cpu_online_mask); 730 smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1); 731 } 732 733 static int __init mtrr_init_finialize(void) 734 { 735 if (!mtrr_enabled()) 736 return 0; 737 738 if (memory_caching_control & CACHE_MTRR) { 739 if (!changed_by_mtrr_cleanup) 740 mtrr_state_warn(); 741 return 0; 742 } 743 744 /* 745 * The CPU has no MTRR and seems to not support SMP. They have 746 * specific drivers, we use a tricky method to support 747 * suspend/resume for them. 748 * 749 * TBD: is there any system with such CPU which supports 750 * suspend/resume? If no, we should remove the code. 751 */ 752 register_syscore_ops(&mtrr_syscore_ops); 753 754 return 0; 755 } 756 subsys_initcall(mtrr_init_finialize); 757