1 /* 2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 3 * because MTRRs can span upto 40 bits (36bits on most modern x86) 4 */ 5 #define DEBUG 6 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/io.h> 11 #include <linux/mm.h> 12 13 #include <asm/processor-flags.h> 14 #include <asm/cpufeature.h> 15 #include <asm/tlbflush.h> 16 #include <asm/system.h> 17 #include <asm/mtrr.h> 18 #include <asm/msr.h> 19 #include <asm/pat.h> 20 21 #include "mtrr.h" 22 23 struct fixed_range_block { 24 int base_msr; /* start address of an MTRR block */ 25 int ranges; /* number of MTRRs in this block */ 26 }; 27 28 static struct fixed_range_block fixed_range_blocks[] = { 29 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ 30 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ 31 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ 32 {} 33 }; 34 35 static unsigned long smp_changes_mask; 36 static int mtrr_state_set; 37 u64 mtrr_tom2; 38 39 struct mtrr_state_type mtrr_state; 40 EXPORT_SYMBOL_GPL(mtrr_state); 41 42 /* 43 * BIOS is expected to clear MtrrFixDramModEn bit, see for example 44 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD 45 * Opteron Processors" (26094 Rev. 3.30 February 2006), section 46 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set 47 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to 48 * 0 for operation." 49 */ 50 static inline void k8_check_syscfg_dram_mod_en(void) 51 { 52 u32 lo, hi; 53 54 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 55 (boot_cpu_data.x86 >= 0x0f))) 56 return; 57 58 rdmsr(MSR_K8_SYSCFG, lo, hi); 59 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { 60 printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" 61 " not cleared by BIOS, clearing this bit\n", 62 smp_processor_id()); 63 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; 64 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi); 65 } 66 } 67 68 /* 69 * Returns the effective MTRR type for the region 70 * Error returns: 71 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR 72 * - 0xFF - when MTRR is not enabled 73 */ 74 u8 mtrr_type_lookup(u64 start, u64 end) 75 { 76 int i; 77 u64 base, mask; 78 u8 prev_match, curr_match; 79 80 if (!mtrr_state_set) 81 return 0xFF; 82 83 if (!mtrr_state.enabled) 84 return 0xFF; 85 86 /* Make end inclusive end, instead of exclusive */ 87 end--; 88 89 /* Look in fixed ranges. Just return the type as per start */ 90 if (mtrr_state.have_fixed && (start < 0x100000)) { 91 int idx; 92 93 if (start < 0x80000) { 94 idx = 0; 95 idx += (start >> 16); 96 return mtrr_state.fixed_ranges[idx]; 97 } else if (start < 0xC0000) { 98 idx = 1 * 8; 99 idx += ((start - 0x80000) >> 14); 100 return mtrr_state.fixed_ranges[idx]; 101 } else if (start < 0x1000000) { 102 idx = 3 * 8; 103 idx += ((start - 0xC0000) >> 12); 104 return mtrr_state.fixed_ranges[idx]; 105 } 106 } 107 108 /* 109 * Look in variable ranges 110 * Look of multiple ranges matching this address and pick type 111 * as per MTRR precedence 112 */ 113 if (!(mtrr_state.enabled & 2)) 114 return mtrr_state.def_type; 115 116 prev_match = 0xFF; 117 for (i = 0; i < num_var_ranges; ++i) { 118 unsigned short start_state, end_state; 119 120 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) 121 continue; 122 123 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 124 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 125 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 126 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 127 128 start_state = ((start & mask) == (base & mask)); 129 end_state = ((end & mask) == (base & mask)); 130 if (start_state != end_state) 131 return 0xFE; 132 133 if ((start & mask) != (base & mask)) 134 continue; 135 136 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 137 if (prev_match == 0xFF) { 138 prev_match = curr_match; 139 continue; 140 } 141 142 if (prev_match == MTRR_TYPE_UNCACHABLE || 143 curr_match == MTRR_TYPE_UNCACHABLE) { 144 return MTRR_TYPE_UNCACHABLE; 145 } 146 147 if ((prev_match == MTRR_TYPE_WRBACK && 148 curr_match == MTRR_TYPE_WRTHROUGH) || 149 (prev_match == MTRR_TYPE_WRTHROUGH && 150 curr_match == MTRR_TYPE_WRBACK)) { 151 prev_match = MTRR_TYPE_WRTHROUGH; 152 curr_match = MTRR_TYPE_WRTHROUGH; 153 } 154 155 if (prev_match != curr_match) 156 return MTRR_TYPE_UNCACHABLE; 157 } 158 159 if (mtrr_tom2) { 160 if (start >= (1ULL<<32) && (end < mtrr_tom2)) 161 return MTRR_TYPE_WRBACK; 162 } 163 164 if (prev_match != 0xFF) 165 return prev_match; 166 167 return mtrr_state.def_type; 168 } 169 170 /* Get the MSR pair relating to a var range */ 171 static void 172 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 173 { 174 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 175 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 176 } 177 178 /* Fill the MSR pair relating to a var range */ 179 void fill_mtrr_var_range(unsigned int index, 180 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 181 { 182 struct mtrr_var_range *vr; 183 184 vr = mtrr_state.var_ranges; 185 186 vr[index].base_lo = base_lo; 187 vr[index].base_hi = base_hi; 188 vr[index].mask_lo = mask_lo; 189 vr[index].mask_hi = mask_hi; 190 } 191 192 static void get_fixed_ranges(mtrr_type *frs) 193 { 194 unsigned int *p = (unsigned int *)frs; 195 int i; 196 197 k8_check_syscfg_dram_mod_en(); 198 199 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); 200 201 for (i = 0; i < 2; i++) 202 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); 203 for (i = 0; i < 8; i++) 204 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); 205 } 206 207 void mtrr_save_fixed_ranges(void *info) 208 { 209 if (cpu_has_mtrr) 210 get_fixed_ranges(mtrr_state.fixed_ranges); 211 } 212 213 static unsigned __initdata last_fixed_start; 214 static unsigned __initdata last_fixed_end; 215 static mtrr_type __initdata last_fixed_type; 216 217 static void __init print_fixed_last(void) 218 { 219 if (!last_fixed_end) 220 return; 221 222 pr_debug(" %05X-%05X %s\n", last_fixed_start, 223 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); 224 225 last_fixed_end = 0; 226 } 227 228 static void __init update_fixed_last(unsigned base, unsigned end, 229 mtrr_type type) 230 { 231 last_fixed_start = base; 232 last_fixed_end = end; 233 last_fixed_type = type; 234 } 235 236 static void __init 237 print_fixed(unsigned base, unsigned step, const mtrr_type *types) 238 { 239 unsigned i; 240 241 for (i = 0; i < 8; ++i, ++types, base += step) { 242 if (last_fixed_end == 0) { 243 update_fixed_last(base, base + step, *types); 244 continue; 245 } 246 if (last_fixed_end == base && last_fixed_type == *types) { 247 last_fixed_end = base + step; 248 continue; 249 } 250 /* new segments: gap or different type */ 251 print_fixed_last(); 252 update_fixed_last(base, base + step, *types); 253 } 254 } 255 256 static void prepare_set(void); 257 static void post_set(void); 258 259 static void __init print_mtrr_state(void) 260 { 261 unsigned int i; 262 int high_width; 263 264 pr_debug("MTRR default type: %s\n", 265 mtrr_attrib_to_str(mtrr_state.def_type)); 266 if (mtrr_state.have_fixed) { 267 pr_debug("MTRR fixed ranges %sabled:\n", 268 mtrr_state.enabled & 1 ? "en" : "dis"); 269 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 270 for (i = 0; i < 2; ++i) 271 print_fixed(0x80000 + i * 0x20000, 0x04000, 272 mtrr_state.fixed_ranges + (i + 1) * 8); 273 for (i = 0; i < 8; ++i) 274 print_fixed(0xC0000 + i * 0x08000, 0x01000, 275 mtrr_state.fixed_ranges + (i + 3) * 8); 276 277 /* tail */ 278 print_fixed_last(); 279 } 280 pr_debug("MTRR variable ranges %sabled:\n", 281 mtrr_state.enabled & 2 ? "en" : "dis"); 282 if (size_or_mask & 0xffffffffUL) 283 high_width = ffs(size_or_mask & 0xffffffffUL) - 1; 284 else 285 high_width = ffs(size_or_mask>>32) + 32 - 1; 286 high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; 287 288 for (i = 0; i < num_var_ranges; ++i) { 289 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 290 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", 291 i, 292 high_width, 293 mtrr_state.var_ranges[i].base_hi, 294 mtrr_state.var_ranges[i].base_lo >> 12, 295 high_width, 296 mtrr_state.var_ranges[i].mask_hi, 297 mtrr_state.var_ranges[i].mask_lo >> 12, 298 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 299 else 300 pr_debug(" %u disabled\n", i); 301 } 302 if (mtrr_tom2) 303 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); 304 } 305 306 /* Grab all of the MTRR state for this CPU into *state */ 307 void __init get_mtrr_state(void) 308 { 309 struct mtrr_var_range *vrs; 310 unsigned long flags; 311 unsigned lo, dummy; 312 unsigned int i; 313 314 vrs = mtrr_state.var_ranges; 315 316 rdmsr(MSR_MTRRcap, lo, dummy); 317 mtrr_state.have_fixed = (lo >> 8) & 1; 318 319 for (i = 0; i < num_var_ranges; i++) 320 get_mtrr_var_range(i, &vrs[i]); 321 if (mtrr_state.have_fixed) 322 get_fixed_ranges(mtrr_state.fixed_ranges); 323 324 rdmsr(MSR_MTRRdefType, lo, dummy); 325 mtrr_state.def_type = (lo & 0xff); 326 mtrr_state.enabled = (lo & 0xc00) >> 10; 327 328 if (amd_special_default_mtrr()) { 329 unsigned low, high; 330 331 /* TOP_MEM2 */ 332 rdmsr(MSR_K8_TOP_MEM2, low, high); 333 mtrr_tom2 = high; 334 mtrr_tom2 <<= 32; 335 mtrr_tom2 |= low; 336 mtrr_tom2 &= 0xffffff800000ULL; 337 } 338 339 print_mtrr_state(); 340 341 mtrr_state_set = 1; 342 343 /* PAT setup for BP. We need to go through sync steps here */ 344 local_irq_save(flags); 345 prepare_set(); 346 347 pat_init(); 348 349 post_set(); 350 local_irq_restore(flags); 351 } 352 353 /* Some BIOS's are messed up and don't set all MTRRs the same! */ 354 void __init mtrr_state_warn(void) 355 { 356 unsigned long mask = smp_changes_mask; 357 358 if (!mask) 359 return; 360 if (mask & MTRR_CHANGE_MASK_FIXED) 361 pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 362 if (mask & MTRR_CHANGE_MASK_VARIABLE) 363 pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n"); 364 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 365 pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 366 367 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 368 printk(KERN_INFO "mtrr: corrected configuration.\n"); 369 } 370 371 /* 372 * Doesn't attempt to pass an error out to MTRR users 373 * because it's quite complicated in some cases and probably not 374 * worth it because the best error handling is to ignore it. 375 */ 376 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 377 { 378 if (wrmsr_safe(msr, a, b) < 0) { 379 printk(KERN_ERR 380 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 381 smp_processor_id(), msr, a, b); 382 } 383 } 384 385 /** 386 * set_fixed_range - checks & updates a fixed-range MTRR if it 387 * differs from the value it should have 388 * @msr: MSR address of the MTTR which should be checked and updated 389 * @changed: pointer which indicates whether the MTRR needed to be changed 390 * @msrwords: pointer to the MSR values which the MSR should have 391 */ 392 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 393 { 394 unsigned lo, hi; 395 396 rdmsr(msr, lo, hi); 397 398 if (lo != msrwords[0] || hi != msrwords[1]) { 399 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 400 *changed = true; 401 } 402 } 403 404 /** 405 * generic_get_free_region - Get a free MTRR. 406 * @base: The starting (base) address of the region. 407 * @size: The size (in bytes) of the region. 408 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 409 * 410 * Returns: The index of the region on success, else negative on error. 411 */ 412 int 413 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 414 { 415 unsigned long lbase, lsize; 416 mtrr_type ltype; 417 int i, max; 418 419 max = num_var_ranges; 420 if (replace_reg >= 0 && replace_reg < max) 421 return replace_reg; 422 423 for (i = 0; i < max; ++i) { 424 mtrr_if->get(i, &lbase, &lsize, <ype); 425 if (lsize == 0) 426 return i; 427 } 428 429 return -ENOSPC; 430 } 431 432 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 433 unsigned long *size, mtrr_type *type) 434 { 435 unsigned int mask_lo, mask_hi, base_lo, base_hi; 436 unsigned int tmp, hi; 437 int cpu; 438 439 /* 440 * get_mtrr doesn't need to update mtrr_state, also it could be called 441 * from any cpu, so try to print it out directly. 442 */ 443 cpu = get_cpu(); 444 445 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 446 447 if ((mask_lo & 0x800) == 0) { 448 /* Invalid (i.e. free) range */ 449 *base = 0; 450 *size = 0; 451 *type = 0; 452 goto out_put_cpu; 453 } 454 455 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 456 457 /* Work out the shifted address mask: */ 458 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; 459 mask_lo = size_or_mask | tmp; 460 461 /* Expand tmp with high bits to all 1s: */ 462 hi = fls(tmp); 463 if (hi > 0) { 464 tmp |= ~((1<<(hi - 1)) - 1); 465 466 if (tmp != mask_lo) { 467 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); 468 mask_lo = tmp; 469 } 470 } 471 472 /* 473 * This works correctly if size is a power of two, i.e. a 474 * contiguous range: 475 */ 476 *size = -mask_lo; 477 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 478 *type = base_lo & 0xff; 479 480 out_put_cpu: 481 put_cpu(); 482 } 483 484 /** 485 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they 486 * differ from the saved set 487 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 488 */ 489 static int set_fixed_ranges(mtrr_type *frs) 490 { 491 unsigned long long *saved = (unsigned long long *)frs; 492 bool changed = false; 493 int block = -1, range; 494 495 k8_check_syscfg_dram_mod_en(); 496 497 while (fixed_range_blocks[++block].ranges) { 498 for (range = 0; range < fixed_range_blocks[block].ranges; range++) 499 set_fixed_range(fixed_range_blocks[block].base_msr + range, 500 &changed, (unsigned int *)saved++); 501 } 502 503 return changed; 504 } 505 506 /* 507 * Set the MSR pair relating to a var range. 508 * Returns true if changes are made. 509 */ 510 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 511 { 512 unsigned int lo, hi; 513 bool changed = false; 514 515 rdmsr(MTRRphysBase_MSR(index), lo, hi); 516 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 517 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 518 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 519 520 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 521 changed = true; 522 } 523 524 rdmsr(MTRRphysMask_MSR(index), lo, hi); 525 526 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 527 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 528 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 529 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 530 changed = true; 531 } 532 return changed; 533 } 534 535 static u32 deftype_lo, deftype_hi; 536 537 /** 538 * set_mtrr_state - Set the MTRR state for this CPU. 539 * 540 * NOTE: The CPU must already be in a safe state for MTRR changes. 541 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 542 */ 543 static unsigned long set_mtrr_state(void) 544 { 545 unsigned long change_mask = 0; 546 unsigned int i; 547 548 for (i = 0; i < num_var_ranges; i++) { 549 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 550 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 551 } 552 553 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 554 change_mask |= MTRR_CHANGE_MASK_FIXED; 555 556 /* 557 * Set_mtrr_restore restores the old value of MTRRdefType, 558 * so to set it we fiddle with the saved value: 559 */ 560 if ((deftype_lo & 0xff) != mtrr_state.def_type 561 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 562 563 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | 564 (mtrr_state.enabled << 10); 565 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 566 } 567 568 return change_mask; 569 } 570 571 572 static unsigned long cr4; 573 static DEFINE_SPINLOCK(set_atomicity_lock); 574 575 /* 576 * Since we are disabling the cache don't allow any interrupts, 577 * they would run extremely slow and would only increase the pain. 578 * 579 * The caller must ensure that local interrupts are disabled and 580 * are reenabled after post_set() has been called. 581 */ 582 static void prepare_set(void) __acquires(set_atomicity_lock) 583 { 584 unsigned long cr0; 585 586 /* 587 * Note that this is not ideal 588 * since the cache is only flushed/disabled for this CPU while the 589 * MTRRs are changed, but changing this requires more invasive 590 * changes to the way the kernel boots 591 */ 592 593 spin_lock(&set_atomicity_lock); 594 595 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 596 cr0 = read_cr0() | X86_CR0_CD; 597 write_cr0(cr0); 598 wbinvd(); 599 600 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 601 if (cpu_has_pge) { 602 cr4 = read_cr4(); 603 write_cr4(cr4 & ~X86_CR4_PGE); 604 } 605 606 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 607 __flush_tlb(); 608 609 /* Save MTRR state */ 610 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 611 612 /* Disable MTRRs, and set the default type to uncached */ 613 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 614 } 615 616 static void post_set(void) __releases(set_atomicity_lock) 617 { 618 /* Flush TLBs (no need to flush caches - they are disabled) */ 619 __flush_tlb(); 620 621 /* Intel (P6) standard MTRRs */ 622 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 623 624 /* Enable caches */ 625 write_cr0(read_cr0() & 0xbfffffff); 626 627 /* Restore value of CR4 */ 628 if (cpu_has_pge) 629 write_cr4(cr4); 630 spin_unlock(&set_atomicity_lock); 631 } 632 633 static void generic_set_all(void) 634 { 635 unsigned long mask, count; 636 unsigned long flags; 637 638 local_irq_save(flags); 639 prepare_set(); 640 641 /* Actually set the state */ 642 mask = set_mtrr_state(); 643 644 /* also set PAT */ 645 pat_init(); 646 647 post_set(); 648 local_irq_restore(flags); 649 650 /* Use the atomic bitops to update the global mask */ 651 for (count = 0; count < sizeof mask * 8; ++count) { 652 if (mask & 0x01) 653 set_bit(count, &smp_changes_mask); 654 mask >>= 1; 655 } 656 657 } 658 659 /** 660 * generic_set_mtrr - set variable MTRR register on the local CPU. 661 * 662 * @reg: The register to set. 663 * @base: The base address of the region. 664 * @size: The size of the region. If this is 0 the region is disabled. 665 * @type: The type of the region. 666 * 667 * Returns nothing. 668 */ 669 static void generic_set_mtrr(unsigned int reg, unsigned long base, 670 unsigned long size, mtrr_type type) 671 { 672 unsigned long flags; 673 struct mtrr_var_range *vr; 674 675 vr = &mtrr_state.var_ranges[reg]; 676 677 local_irq_save(flags); 678 prepare_set(); 679 680 if (size == 0) { 681 /* 682 * The invalid bit is kept in the mask, so we simply 683 * clear the relevant mask register to disable a range. 684 */ 685 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 686 memset(vr, 0, sizeof(struct mtrr_var_range)); 687 } else { 688 vr->base_lo = base << PAGE_SHIFT | type; 689 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 690 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 691 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 692 693 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 694 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 695 } 696 697 post_set(); 698 local_irq_restore(flags); 699 } 700 701 int generic_validate_add_page(unsigned long base, unsigned long size, 702 unsigned int type) 703 { 704 unsigned long lbase, last; 705 706 /* 707 * For Intel PPro stepping <= 7 708 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF 709 */ 710 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 711 boot_cpu_data.x86_model == 1 && 712 boot_cpu_data.x86_mask <= 7) { 713 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 714 pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 715 return -EINVAL; 716 } 717 if (!(base + size < 0x70000 || base > 0x7003F) && 718 (type == MTRR_TYPE_WRCOMB 719 || type == MTRR_TYPE_WRBACK)) { 720 pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 721 return -EINVAL; 722 } 723 } 724 725 /* 726 * Check upper bits of base and last are equal and lower bits are 0 727 * for base and 1 for last 728 */ 729 last = base + size - 1; 730 for (lbase = base; !(lbase & 1) && (last & 1); 731 lbase = lbase >> 1, last = last >> 1) 732 ; 733 if (lbase != last) { 734 pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); 735 return -EINVAL; 736 } 737 return 0; 738 } 739 740 static int generic_have_wrcomb(void) 741 { 742 unsigned long config, dummy; 743 rdmsr(MSR_MTRRcap, config, dummy); 744 return config & (1 << 10); 745 } 746 747 int positive_have_wrcomb(void) 748 { 749 return 1; 750 } 751 752 /* 753 * Generic structure... 754 */ 755 struct mtrr_ops generic_mtrr_ops = { 756 .use_intel_if = 1, 757 .set_all = generic_set_all, 758 .get = generic_get_mtrr, 759 .get_free_region = generic_get_free_region, 760 .set = generic_set_mtrr, 761 .validate_add_page = generic_validate_add_page, 762 .have_wrcomb = generic_have_wrcomb, 763 }; 764