1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 2 because MTRRs can span upto 40 bits (36bits on most modern x86) */ 3 #include <linux/init.h> 4 #include <linux/slab.h> 5 #include <linux/mm.h> 6 #include <linux/module.h> 7 #include <asm/io.h> 8 #include <asm/mtrr.h> 9 #include <asm/msr.h> 10 #include <asm/system.h> 11 #include <asm/cpufeature.h> 12 #include <asm/processor-flags.h> 13 #include <asm/tlbflush.h> 14 #include <asm/pat.h> 15 #include "mtrr.h" 16 17 struct fixed_range_block { 18 int base_msr; /* start address of an MTRR block */ 19 int ranges; /* number of MTRRs in this block */ 20 }; 21 22 static struct fixed_range_block fixed_range_blocks[] = { 23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */ 24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */ 25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */ 26 {} 27 }; 28 29 static unsigned long smp_changes_mask; 30 static int mtrr_state_set; 31 u64 mtrr_tom2; 32 33 struct mtrr_state_type mtrr_state = {}; 34 EXPORT_SYMBOL_GPL(mtrr_state); 35 36 /** 37 * BIOS is expected to clear MtrrFixDramModEn bit, see for example 38 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD 39 * Opteron Processors" (26094 Rev. 3.30 February 2006), section 40 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set 41 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to 42 * 0 for operation." 43 */ 44 static inline void k8_check_syscfg_dram_mod_en(void) 45 { 46 u32 lo, hi; 47 48 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 49 (boot_cpu_data.x86 >= 0x0f))) 50 return; 51 52 rdmsr(MSR_K8_SYSCFG, lo, hi); 53 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { 54 printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" 55 " not cleared by BIOS, clearing this bit\n", 56 smp_processor_id()); 57 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; 58 mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi); 59 } 60 } 61 62 /* 63 * Returns the effective MTRR type for the region 64 * Error returns: 65 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR 66 * - 0xFF - when MTRR is not enabled 67 */ 68 u8 mtrr_type_lookup(u64 start, u64 end) 69 { 70 int i; 71 u64 base, mask; 72 u8 prev_match, curr_match; 73 74 if (!mtrr_state_set) 75 return 0xFF; 76 77 if (!mtrr_state.enabled) 78 return 0xFF; 79 80 /* Make end inclusive end, instead of exclusive */ 81 end--; 82 83 /* Look in fixed ranges. Just return the type as per start */ 84 if (mtrr_state.have_fixed && (start < 0x100000)) { 85 int idx; 86 87 if (start < 0x80000) { 88 idx = 0; 89 idx += (start >> 16); 90 return mtrr_state.fixed_ranges[idx]; 91 } else if (start < 0xC0000) { 92 idx = 1 * 8; 93 idx += ((start - 0x80000) >> 14); 94 return mtrr_state.fixed_ranges[idx]; 95 } else if (start < 0x1000000) { 96 idx = 3 * 8; 97 idx += ((start - 0xC0000) >> 12); 98 return mtrr_state.fixed_ranges[idx]; 99 } 100 } 101 102 /* 103 * Look in variable ranges 104 * Look of multiple ranges matching this address and pick type 105 * as per MTRR precedence 106 */ 107 if (!(mtrr_state.enabled & 2)) { 108 return mtrr_state.def_type; 109 } 110 111 prev_match = 0xFF; 112 for (i = 0; i < num_var_ranges; ++i) { 113 unsigned short start_state, end_state; 114 115 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) 116 continue; 117 118 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 119 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 120 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 121 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 122 123 start_state = ((start & mask) == (base & mask)); 124 end_state = ((end & mask) == (base & mask)); 125 if (start_state != end_state) 126 return 0xFE; 127 128 if ((start & mask) != (base & mask)) { 129 continue; 130 } 131 132 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 133 if (prev_match == 0xFF) { 134 prev_match = curr_match; 135 continue; 136 } 137 138 if (prev_match == MTRR_TYPE_UNCACHABLE || 139 curr_match == MTRR_TYPE_UNCACHABLE) { 140 return MTRR_TYPE_UNCACHABLE; 141 } 142 143 if ((prev_match == MTRR_TYPE_WRBACK && 144 curr_match == MTRR_TYPE_WRTHROUGH) || 145 (prev_match == MTRR_TYPE_WRTHROUGH && 146 curr_match == MTRR_TYPE_WRBACK)) { 147 prev_match = MTRR_TYPE_WRTHROUGH; 148 curr_match = MTRR_TYPE_WRTHROUGH; 149 } 150 151 if (prev_match != curr_match) { 152 return MTRR_TYPE_UNCACHABLE; 153 } 154 } 155 156 if (mtrr_tom2) { 157 if (start >= (1ULL<<32) && (end < mtrr_tom2)) 158 return MTRR_TYPE_WRBACK; 159 } 160 161 if (prev_match != 0xFF) 162 return prev_match; 163 164 return mtrr_state.def_type; 165 } 166 167 /* Get the MSR pair relating to a var range */ 168 static void 169 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 170 { 171 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 172 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 173 } 174 175 /* fill the MSR pair relating to a var range */ 176 void fill_mtrr_var_range(unsigned int index, 177 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 178 { 179 struct mtrr_var_range *vr; 180 181 vr = mtrr_state.var_ranges; 182 183 vr[index].base_lo = base_lo; 184 vr[index].base_hi = base_hi; 185 vr[index].mask_lo = mask_lo; 186 vr[index].mask_hi = mask_hi; 187 } 188 189 static void 190 get_fixed_ranges(mtrr_type * frs) 191 { 192 unsigned int *p = (unsigned int *) frs; 193 int i; 194 195 k8_check_syscfg_dram_mod_en(); 196 197 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]); 198 199 for (i = 0; i < 2; i++) 200 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]); 201 for (i = 0; i < 8; i++) 202 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 203 } 204 205 void mtrr_save_fixed_ranges(void *info) 206 { 207 if (cpu_has_mtrr) 208 get_fixed_ranges(mtrr_state.fixed_ranges); 209 } 210 211 static unsigned __initdata last_fixed_start; 212 static unsigned __initdata last_fixed_end; 213 static mtrr_type __initdata last_fixed_type; 214 215 static void __init print_fixed_last(void) 216 { 217 if (!last_fixed_end) 218 return; 219 220 printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, 221 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); 222 223 last_fixed_end = 0; 224 } 225 226 static void __init update_fixed_last(unsigned base, unsigned end, 227 mtrr_type type) 228 { 229 last_fixed_start = base; 230 last_fixed_end = end; 231 last_fixed_type = type; 232 } 233 234 static void __init print_fixed(unsigned base, unsigned step, 235 const mtrr_type *types) 236 { 237 unsigned i; 238 239 for (i = 0; i < 8; ++i, ++types, base += step) { 240 if (last_fixed_end == 0) { 241 update_fixed_last(base, base + step, *types); 242 continue; 243 } 244 if (last_fixed_end == base && last_fixed_type == *types) { 245 last_fixed_end = base + step; 246 continue; 247 } 248 /* new segments: gap or different type */ 249 print_fixed_last(); 250 update_fixed_last(base, base + step, *types); 251 } 252 } 253 254 static void prepare_set(void); 255 static void post_set(void); 256 257 static void __init print_mtrr_state(void) 258 { 259 unsigned int i; 260 int high_width; 261 262 printk(KERN_DEBUG "MTRR default type: %s\n", 263 mtrr_attrib_to_str(mtrr_state.def_type)); 264 if (mtrr_state.have_fixed) { 265 printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", 266 mtrr_state.enabled & 1 ? "en" : "dis"); 267 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 268 for (i = 0; i < 2; ++i) 269 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); 270 for (i = 0; i < 8; ++i) 271 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); 272 273 /* tail */ 274 print_fixed_last(); 275 } 276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", 277 mtrr_state.enabled & 2 ? "en" : "dis"); 278 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4; 279 for (i = 0; i < num_var_ranges; ++i) { 280 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 281 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", 282 i, 283 high_width, 284 mtrr_state.var_ranges[i].base_hi, 285 mtrr_state.var_ranges[i].base_lo >> 12, 286 high_width, 287 mtrr_state.var_ranges[i].mask_hi, 288 mtrr_state.var_ranges[i].mask_lo >> 12, 289 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 290 else 291 printk(KERN_DEBUG " %u disabled\n", i); 292 } 293 if (mtrr_tom2) { 294 printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n", 295 mtrr_tom2, mtrr_tom2>>20); 296 } 297 } 298 299 /* Grab all of the MTRR state for this CPU into *state */ 300 void __init get_mtrr_state(void) 301 { 302 unsigned int i; 303 struct mtrr_var_range *vrs; 304 unsigned lo, dummy; 305 unsigned long flags; 306 307 vrs = mtrr_state.var_ranges; 308 309 rdmsr(MTRRcap_MSR, lo, dummy); 310 mtrr_state.have_fixed = (lo >> 8) & 1; 311 312 for (i = 0; i < num_var_ranges; i++) 313 get_mtrr_var_range(i, &vrs[i]); 314 if (mtrr_state.have_fixed) 315 get_fixed_ranges(mtrr_state.fixed_ranges); 316 317 rdmsr(MTRRdefType_MSR, lo, dummy); 318 mtrr_state.def_type = (lo & 0xff); 319 mtrr_state.enabled = (lo & 0xc00) >> 10; 320 321 if (amd_special_default_mtrr()) { 322 unsigned low, high; 323 /* TOP_MEM2 */ 324 rdmsr(MSR_K8_TOP_MEM2, low, high); 325 mtrr_tom2 = high; 326 mtrr_tom2 <<= 32; 327 mtrr_tom2 |= low; 328 mtrr_tom2 &= 0xffffff800000ULL; 329 } 330 331 print_mtrr_state(); 332 333 mtrr_state_set = 1; 334 335 /* PAT setup for BP. We need to go through sync steps here */ 336 local_irq_save(flags); 337 prepare_set(); 338 339 pat_init(); 340 341 post_set(); 342 local_irq_restore(flags); 343 344 } 345 346 /* Some BIOS's are fucked and don't set all MTRRs the same! */ 347 void __init mtrr_state_warn(void) 348 { 349 unsigned long mask = smp_changes_mask; 350 351 if (!mask) 352 return; 353 if (mask & MTRR_CHANGE_MASK_FIXED) 354 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 355 if (mask & MTRR_CHANGE_MASK_VARIABLE) 356 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); 357 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 358 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 359 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 360 printk(KERN_INFO "mtrr: corrected configuration.\n"); 361 } 362 363 /* Doesn't attempt to pass an error out to MTRR users 364 because it's quite complicated in some cases and probably not 365 worth it because the best error handling is to ignore it. */ 366 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 367 { 368 if (wrmsr_safe(msr, a, b) < 0) 369 printk(KERN_ERR 370 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 371 smp_processor_id(), msr, a, b); 372 } 373 374 /** 375 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have 376 * @msr: MSR address of the MTTR which should be checked and updated 377 * @changed: pointer which indicates whether the MTRR needed to be changed 378 * @msrwords: pointer to the MSR values which the MSR should have 379 */ 380 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 381 { 382 unsigned lo, hi; 383 384 rdmsr(msr, lo, hi); 385 386 if (lo != msrwords[0] || hi != msrwords[1]) { 387 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 388 *changed = true; 389 } 390 } 391 392 /** 393 * generic_get_free_region - Get a free MTRR. 394 * @base: The starting (base) address of the region. 395 * @size: The size (in bytes) of the region. 396 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 397 * 398 * Returns: The index of the region on success, else negative on error. 399 */ 400 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 401 { 402 int i, max; 403 mtrr_type ltype; 404 unsigned long lbase, lsize; 405 406 max = num_var_ranges; 407 if (replace_reg >= 0 && replace_reg < max) 408 return replace_reg; 409 for (i = 0; i < max; ++i) { 410 mtrr_if->get(i, &lbase, &lsize, <ype); 411 if (lsize == 0) 412 return i; 413 } 414 return -ENOSPC; 415 } 416 417 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 418 unsigned long *size, mtrr_type *type) 419 { 420 unsigned int mask_lo, mask_hi, base_lo, base_hi; 421 unsigned int tmp, hi; 422 int cpu; 423 424 /* 425 * get_mtrr doesn't need to update mtrr_state, also it could be called 426 * from any cpu, so try to print it out directly. 427 */ 428 cpu = get_cpu(); 429 430 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 431 432 if ((mask_lo & 0x800) == 0) { 433 /* Invalid (i.e. free) range */ 434 *base = 0; 435 *size = 0; 436 *type = 0; 437 goto out_put_cpu; 438 } 439 440 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 441 442 /* Work out the shifted address mask: */ 443 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; 444 mask_lo = size_or_mask | tmp; 445 446 /* Expand tmp with high bits to all 1s: */ 447 hi = fls(tmp); 448 if (hi > 0) { 449 tmp |= ~((1<<(hi - 1)) - 1); 450 451 if (tmp != mask_lo) { 452 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); 453 mask_lo = tmp; 454 } 455 } 456 457 /* 458 * This works correctly if size is a power of two, i.e. a 459 * contiguous range: 460 */ 461 *size = -mask_lo; 462 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 463 *type = base_lo & 0xff; 464 465 printk(KERN_DEBUG " get_mtrr: cpu%d reg%02d base=%010lx size=%010lx %s\n", 466 cpu, reg, *base, *size, 467 mtrr_attrib_to_str(*type & 0xff)); 468 out_put_cpu: 469 put_cpu(); 470 } 471 472 /** 473 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set 474 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 475 */ 476 static int set_fixed_ranges(mtrr_type * frs) 477 { 478 unsigned long long *saved = (unsigned long long *) frs; 479 bool changed = false; 480 int block=-1, range; 481 482 k8_check_syscfg_dram_mod_en(); 483 484 while (fixed_range_blocks[++block].ranges) 485 for (range=0; range < fixed_range_blocks[block].ranges; range++) 486 set_fixed_range(fixed_range_blocks[block].base_msr + range, 487 &changed, (unsigned int *) saved++); 488 489 return changed; 490 } 491 492 /* Set the MSR pair relating to a var range. Returns TRUE if 493 changes are made */ 494 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 495 { 496 unsigned int lo, hi; 497 bool changed = false; 498 499 rdmsr(MTRRphysBase_MSR(index), lo, hi); 500 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 501 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 502 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 503 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 504 changed = true; 505 } 506 507 rdmsr(MTRRphysMask_MSR(index), lo, hi); 508 509 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 510 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 511 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 512 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 513 changed = true; 514 } 515 return changed; 516 } 517 518 static u32 deftype_lo, deftype_hi; 519 520 /** 521 * set_mtrr_state - Set the MTRR state for this CPU. 522 * 523 * NOTE: The CPU must already be in a safe state for MTRR changes. 524 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 525 */ 526 static unsigned long set_mtrr_state(void) 527 { 528 unsigned int i; 529 unsigned long change_mask = 0; 530 531 for (i = 0; i < num_var_ranges; i++) 532 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 533 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 534 535 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 536 change_mask |= MTRR_CHANGE_MASK_FIXED; 537 538 /* Set_mtrr_restore restores the old value of MTRRdefType, 539 so to set it we fiddle with the saved value */ 540 if ((deftype_lo & 0xff) != mtrr_state.def_type 541 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 542 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 543 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 544 } 545 546 return change_mask; 547 } 548 549 550 static unsigned long cr4 = 0; 551 static DEFINE_SPINLOCK(set_atomicity_lock); 552 553 /* 554 * Since we are disabling the cache don't allow any interrupts - they 555 * would run extremely slow and would only increase the pain. The caller must 556 * ensure that local interrupts are disabled and are reenabled after post_set() 557 * has been called. 558 */ 559 560 static void prepare_set(void) __acquires(set_atomicity_lock) 561 { 562 unsigned long cr0; 563 564 /* Note that this is not ideal, since the cache is only flushed/disabled 565 for this CPU while the MTRRs are changed, but changing this requires 566 more invasive changes to the way the kernel boots */ 567 568 spin_lock(&set_atomicity_lock); 569 570 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 571 cr0 = read_cr0() | X86_CR0_CD; 572 write_cr0(cr0); 573 wbinvd(); 574 575 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 576 if ( cpu_has_pge ) { 577 cr4 = read_cr4(); 578 write_cr4(cr4 & ~X86_CR4_PGE); 579 } 580 581 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 582 __flush_tlb(); 583 584 /* Save MTRR state */ 585 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 586 587 /* Disable MTRRs, and set the default type to uncached */ 588 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi); 589 } 590 591 static void post_set(void) __releases(set_atomicity_lock) 592 { 593 /* Flush TLBs (no need to flush caches - they are disabled) */ 594 __flush_tlb(); 595 596 /* Intel (P6) standard MTRRs */ 597 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 598 599 /* Enable caches */ 600 write_cr0(read_cr0() & 0xbfffffff); 601 602 /* Restore value of CR4 */ 603 if ( cpu_has_pge ) 604 write_cr4(cr4); 605 spin_unlock(&set_atomicity_lock); 606 } 607 608 static void generic_set_all(void) 609 { 610 unsigned long mask, count; 611 unsigned long flags; 612 613 local_irq_save(flags); 614 prepare_set(); 615 616 /* Actually set the state */ 617 mask = set_mtrr_state(); 618 619 /* also set PAT */ 620 pat_init(); 621 622 post_set(); 623 local_irq_restore(flags); 624 625 /* Use the atomic bitops to update the global mask */ 626 for (count = 0; count < sizeof mask * 8; ++count) { 627 if (mask & 0x01) 628 set_bit(count, &smp_changes_mask); 629 mask >>= 1; 630 } 631 632 } 633 634 static void generic_set_mtrr(unsigned int reg, unsigned long base, 635 unsigned long size, mtrr_type type) 636 /* [SUMMARY] Set variable MTRR register on the local CPU. 637 <reg> The register to set. 638 <base> The base address of the region. 639 <size> The size of the region. If this is 0 the region is disabled. 640 <type> The type of the region. 641 [RETURNS] Nothing. 642 */ 643 { 644 unsigned long flags; 645 struct mtrr_var_range *vr; 646 647 vr = &mtrr_state.var_ranges[reg]; 648 649 local_irq_save(flags); 650 prepare_set(); 651 652 if (size == 0) { 653 /* The invalid bit is kept in the mask, so we simply clear the 654 relevant mask register to disable a range. */ 655 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 656 memset(vr, 0, sizeof(struct mtrr_var_range)); 657 } else { 658 vr->base_lo = base << PAGE_SHIFT | type; 659 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 660 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 661 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 662 663 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 664 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 665 } 666 667 post_set(); 668 local_irq_restore(flags); 669 } 670 671 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 672 { 673 unsigned long lbase, last; 674 675 /* For Intel PPro stepping <= 7, must be 4 MiB aligned 676 and not touch 0x70000000->0x7003FFFF */ 677 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 678 boot_cpu_data.x86_model == 1 && 679 boot_cpu_data.x86_mask <= 7) { 680 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 681 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 682 return -EINVAL; 683 } 684 if (!(base + size < 0x70000 || base > 0x7003F) && 685 (type == MTRR_TYPE_WRCOMB 686 || type == MTRR_TYPE_WRBACK)) { 687 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 688 return -EINVAL; 689 } 690 } 691 692 /* Check upper bits of base and last are equal and lower bits are 0 693 for base and 1 for last */ 694 last = base + size - 1; 695 for (lbase = base; !(lbase & 1) && (last & 1); 696 lbase = lbase >> 1, last = last >> 1) ; 697 if (lbase != last) { 698 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", 699 base, size); 700 return -EINVAL; 701 } 702 return 0; 703 } 704 705 706 static int generic_have_wrcomb(void) 707 { 708 unsigned long config, dummy; 709 rdmsr(MTRRcap_MSR, config, dummy); 710 return (config & (1 << 10)); 711 } 712 713 int positive_have_wrcomb(void) 714 { 715 return 1; 716 } 717 718 /* generic structure... 719 */ 720 struct mtrr_ops generic_mtrr_ops = { 721 .use_intel_if = 1, 722 .set_all = generic_set_all, 723 .get = generic_get_mtrr, 724 .get_free_region = generic_get_free_region, 725 .set = generic_set_mtrr, 726 .validate_add_page = generic_validate_add_page, 727 .have_wrcomb = generic_have_wrcomb, 728 }; 729