1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 4 * because MTRRs can span up to 40 bits (36bits on most modern x86) 5 */ 6 7 #include <linux/export.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/mm.h> 11 12 #include <asm/processor-flags.h> 13 #include <asm/cacheinfo.h> 14 #include <asm/cpufeature.h> 15 #include <asm/tlbflush.h> 16 #include <asm/mtrr.h> 17 #include <asm/msr.h> 18 #include <asm/memtype.h> 19 20 #include "mtrr.h" 21 22 struct fixed_range_block { 23 int base_msr; /* start address of an MTRR block */ 24 int ranges; /* number of MTRRs in this block */ 25 }; 26 27 static struct fixed_range_block fixed_range_blocks[] = { 28 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ 29 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ 30 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ 31 {} 32 }; 33 34 static unsigned long smp_changes_mask; 35 static int mtrr_state_set; 36 u64 mtrr_tom2; 37 38 struct mtrr_state_type mtrr_state; 39 EXPORT_SYMBOL_GPL(mtrr_state); 40 41 /* 42 * BIOS is expected to clear MtrrFixDramModEn bit, see for example 43 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD 44 * Opteron Processors" (26094 Rev. 3.30 February 2006), section 45 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set 46 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to 47 * 0 for operation." 48 */ 49 static inline void k8_check_syscfg_dram_mod_en(void) 50 { 51 u32 lo, hi; 52 53 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 54 (boot_cpu_data.x86 >= 0x0f))) 55 return; 56 57 rdmsr(MSR_AMD64_SYSCFG, lo, hi); 58 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { 59 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" 60 " not cleared by BIOS, clearing this bit\n", 61 smp_processor_id()); 62 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; 63 mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi); 64 } 65 } 66 67 /* Get the size of contiguous MTRR range */ 68 static u64 get_mtrr_size(u64 mask) 69 { 70 u64 size; 71 72 mask >>= PAGE_SHIFT; 73 mask |= size_or_mask; 74 size = -mask; 75 size <<= PAGE_SHIFT; 76 return size; 77 } 78 79 /* 80 * Check and return the effective type for MTRR-MTRR type overlap. 81 * Returns 1 if the effective type is UNCACHEABLE, else returns 0 82 */ 83 static int check_type_overlap(u8 *prev, u8 *curr) 84 { 85 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { 86 *prev = MTRR_TYPE_UNCACHABLE; 87 *curr = MTRR_TYPE_UNCACHABLE; 88 return 1; 89 } 90 91 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || 92 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { 93 *prev = MTRR_TYPE_WRTHROUGH; 94 *curr = MTRR_TYPE_WRTHROUGH; 95 } 96 97 if (*prev != *curr) { 98 *prev = MTRR_TYPE_UNCACHABLE; 99 *curr = MTRR_TYPE_UNCACHABLE; 100 return 1; 101 } 102 103 return 0; 104 } 105 106 /** 107 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries 108 * 109 * Return the MTRR fixed memory type of 'start'. 110 * 111 * MTRR fixed entries are divided into the following ways: 112 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges 113 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges 114 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges 115 * 116 * Return Values: 117 * MTRR_TYPE_(type) - Matched memory type 118 * MTRR_TYPE_INVALID - Unmatched 119 */ 120 static u8 mtrr_type_lookup_fixed(u64 start, u64 end) 121 { 122 int idx; 123 124 if (start >= 0x100000) 125 return MTRR_TYPE_INVALID; 126 127 /* 0x0 - 0x7FFFF */ 128 if (start < 0x80000) { 129 idx = 0; 130 idx += (start >> 16); 131 return mtrr_state.fixed_ranges[idx]; 132 /* 0x80000 - 0xBFFFF */ 133 } else if (start < 0xC0000) { 134 idx = 1 * 8; 135 idx += ((start - 0x80000) >> 14); 136 return mtrr_state.fixed_ranges[idx]; 137 } 138 139 /* 0xC0000 - 0xFFFFF */ 140 idx = 3 * 8; 141 idx += ((start - 0xC0000) >> 12); 142 return mtrr_state.fixed_ranges[idx]; 143 } 144 145 /** 146 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries 147 * 148 * Return Value: 149 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched) 150 * 151 * Output Arguments: 152 * repeat - Set to 1 when [start:end] spanned across MTRR range and type 153 * returned corresponds only to [start:*partial_end]. Caller has 154 * to lookup again for [*partial_end:end]. 155 * 156 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the 157 * region is fully covered by a single MTRR entry or the default 158 * type. 159 */ 160 static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, 161 int *repeat, u8 *uniform) 162 { 163 int i; 164 u64 base, mask; 165 u8 prev_match, curr_match; 166 167 *repeat = 0; 168 *uniform = 1; 169 170 prev_match = MTRR_TYPE_INVALID; 171 for (i = 0; i < num_var_ranges; ++i) { 172 unsigned short start_state, end_state, inclusive; 173 174 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11))) 175 continue; 176 177 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 178 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 179 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 180 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 181 182 start_state = ((start & mask) == (base & mask)); 183 end_state = ((end & mask) == (base & mask)); 184 inclusive = ((start < base) && (end > base)); 185 186 if ((start_state != end_state) || inclusive) { 187 /* 188 * We have start:end spanning across an MTRR. 189 * We split the region into either 190 * 191 * - start_state:1 192 * (start:mtrr_end)(mtrr_end:end) 193 * - end_state:1 194 * (start:mtrr_start)(mtrr_start:end) 195 * - inclusive:1 196 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end) 197 * 198 * depending on kind of overlap. 199 * 200 * Return the type of the first region and a pointer 201 * to the start of next region so that caller will be 202 * advised to lookup again after having adjusted start 203 * and end. 204 * 205 * Note: This way we handle overlaps with multiple 206 * entries and the default type properly. 207 */ 208 if (start_state) 209 *partial_end = base + get_mtrr_size(mask); 210 else 211 *partial_end = base; 212 213 if (unlikely(*partial_end <= start)) { 214 WARN_ON(1); 215 *partial_end = start + PAGE_SIZE; 216 } 217 218 end = *partial_end - 1; /* end is inclusive */ 219 *repeat = 1; 220 *uniform = 0; 221 } 222 223 if ((start & mask) != (base & mask)) 224 continue; 225 226 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 227 if (prev_match == MTRR_TYPE_INVALID) { 228 prev_match = curr_match; 229 continue; 230 } 231 232 *uniform = 0; 233 if (check_type_overlap(&prev_match, &curr_match)) 234 return curr_match; 235 } 236 237 if (prev_match != MTRR_TYPE_INVALID) 238 return prev_match; 239 240 return mtrr_state.def_type; 241 } 242 243 /** 244 * mtrr_type_lookup - look up memory type in MTRR 245 * 246 * Return Values: 247 * MTRR_TYPE_(type) - The effective MTRR type for the region 248 * MTRR_TYPE_INVALID - MTRR is disabled 249 * 250 * Output Argument: 251 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the 252 * region is fully covered by a single MTRR entry or the default 253 * type. 254 */ 255 u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) 256 { 257 u8 type, prev_type, is_uniform = 1, dummy; 258 int repeat; 259 u64 partial_end; 260 261 /* Make end inclusive instead of exclusive */ 262 end--; 263 264 if (!mtrr_state_set) 265 return MTRR_TYPE_INVALID; 266 267 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED)) 268 return MTRR_TYPE_INVALID; 269 270 /* 271 * Look up the fixed ranges first, which take priority over 272 * the variable ranges. 273 */ 274 if ((start < 0x100000) && 275 (mtrr_state.have_fixed) && 276 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { 277 is_uniform = 0; 278 type = mtrr_type_lookup_fixed(start, end); 279 goto out; 280 } 281 282 /* 283 * Look up the variable ranges. Look of multiple ranges matching 284 * this address and pick type as per MTRR precedence. 285 */ 286 type = mtrr_type_lookup_variable(start, end, &partial_end, 287 &repeat, &is_uniform); 288 289 /* 290 * Common path is with repeat = 0. 291 * However, we can have cases where [start:end] spans across some 292 * MTRR ranges and/or the default type. Do repeated lookups for 293 * that case here. 294 */ 295 while (repeat) { 296 prev_type = type; 297 start = partial_end; 298 is_uniform = 0; 299 type = mtrr_type_lookup_variable(start, end, &partial_end, 300 &repeat, &dummy); 301 302 if (check_type_overlap(&prev_type, &type)) 303 goto out; 304 } 305 306 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2)) 307 type = MTRR_TYPE_WRBACK; 308 309 out: 310 *uniform = is_uniform; 311 return type; 312 } 313 314 /* Get the MSR pair relating to a var range */ 315 static void 316 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 317 { 318 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 319 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 320 } 321 322 /* Fill the MSR pair relating to a var range */ 323 void fill_mtrr_var_range(unsigned int index, 324 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 325 { 326 struct mtrr_var_range *vr; 327 328 vr = mtrr_state.var_ranges; 329 330 vr[index].base_lo = base_lo; 331 vr[index].base_hi = base_hi; 332 vr[index].mask_lo = mask_lo; 333 vr[index].mask_hi = mask_hi; 334 } 335 336 static void get_fixed_ranges(mtrr_type *frs) 337 { 338 unsigned int *p = (unsigned int *)frs; 339 int i; 340 341 k8_check_syscfg_dram_mod_en(); 342 343 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); 344 345 for (i = 0; i < 2; i++) 346 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); 347 for (i = 0; i < 8; i++) 348 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); 349 } 350 351 void mtrr_save_fixed_ranges(void *info) 352 { 353 if (boot_cpu_has(X86_FEATURE_MTRR)) 354 get_fixed_ranges(mtrr_state.fixed_ranges); 355 } 356 357 static unsigned __initdata last_fixed_start; 358 static unsigned __initdata last_fixed_end; 359 static mtrr_type __initdata last_fixed_type; 360 361 static void __init print_fixed_last(void) 362 { 363 if (!last_fixed_end) 364 return; 365 366 pr_debug(" %05X-%05X %s\n", last_fixed_start, 367 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); 368 369 last_fixed_end = 0; 370 } 371 372 static void __init update_fixed_last(unsigned base, unsigned end, 373 mtrr_type type) 374 { 375 last_fixed_start = base; 376 last_fixed_end = end; 377 last_fixed_type = type; 378 } 379 380 static void __init 381 print_fixed(unsigned base, unsigned step, const mtrr_type *types) 382 { 383 unsigned i; 384 385 for (i = 0; i < 8; ++i, ++types, base += step) { 386 if (last_fixed_end == 0) { 387 update_fixed_last(base, base + step, *types); 388 continue; 389 } 390 if (last_fixed_end == base && last_fixed_type == *types) { 391 last_fixed_end = base + step; 392 continue; 393 } 394 /* new segments: gap or different type */ 395 print_fixed_last(); 396 update_fixed_last(base, base + step, *types); 397 } 398 } 399 400 static void __init print_mtrr_state(void) 401 { 402 unsigned int i; 403 int high_width; 404 405 pr_debug("MTRR default type: %s\n", 406 mtrr_attrib_to_str(mtrr_state.def_type)); 407 if (mtrr_state.have_fixed) { 408 pr_debug("MTRR fixed ranges %sabled:\n", 409 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && 410 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ? 411 "en" : "dis"); 412 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 413 for (i = 0; i < 2; ++i) 414 print_fixed(0x80000 + i * 0x20000, 0x04000, 415 mtrr_state.fixed_ranges + (i + 1) * 8); 416 for (i = 0; i < 8; ++i) 417 print_fixed(0xC0000 + i * 0x08000, 0x01000, 418 mtrr_state.fixed_ranges + (i + 3) * 8); 419 420 /* tail */ 421 print_fixed_last(); 422 } 423 pr_debug("MTRR variable ranges %sabled:\n", 424 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis"); 425 high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4; 426 427 for (i = 0; i < num_var_ranges; ++i) { 428 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 429 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", 430 i, 431 high_width, 432 mtrr_state.var_ranges[i].base_hi, 433 mtrr_state.var_ranges[i].base_lo >> 12, 434 high_width, 435 mtrr_state.var_ranges[i].mask_hi, 436 mtrr_state.var_ranges[i].mask_lo >> 12, 437 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 438 else 439 pr_debug(" %u disabled\n", i); 440 } 441 if (mtrr_tom2) 442 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); 443 } 444 445 /* Grab all of the MTRR state for this CPU into *state */ 446 bool __init get_mtrr_state(void) 447 { 448 struct mtrr_var_range *vrs; 449 unsigned lo, dummy; 450 unsigned int i; 451 452 vrs = mtrr_state.var_ranges; 453 454 rdmsr(MSR_MTRRcap, lo, dummy); 455 mtrr_state.have_fixed = (lo >> 8) & 1; 456 457 for (i = 0; i < num_var_ranges; i++) 458 get_mtrr_var_range(i, &vrs[i]); 459 if (mtrr_state.have_fixed) 460 get_fixed_ranges(mtrr_state.fixed_ranges); 461 462 rdmsr(MSR_MTRRdefType, lo, dummy); 463 mtrr_state.def_type = (lo & 0xff); 464 mtrr_state.enabled = (lo & 0xc00) >> 10; 465 466 if (amd_special_default_mtrr()) { 467 unsigned low, high; 468 469 /* TOP_MEM2 */ 470 rdmsr(MSR_K8_TOP_MEM2, low, high); 471 mtrr_tom2 = high; 472 mtrr_tom2 <<= 32; 473 mtrr_tom2 |= low; 474 mtrr_tom2 &= 0xffffff800000ULL; 475 } 476 477 print_mtrr_state(); 478 479 mtrr_state_set = 1; 480 481 return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED); 482 } 483 484 /* Some BIOS's are messed up and don't set all MTRRs the same! */ 485 void __init mtrr_state_warn(void) 486 { 487 unsigned long mask = smp_changes_mask; 488 489 if (!mask) 490 return; 491 if (mask & MTRR_CHANGE_MASK_FIXED) 492 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 493 if (mask & MTRR_CHANGE_MASK_VARIABLE) 494 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n"); 495 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 496 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 497 498 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n"); 499 pr_info("mtrr: corrected configuration.\n"); 500 } 501 502 /* 503 * Doesn't attempt to pass an error out to MTRR users 504 * because it's quite complicated in some cases and probably not 505 * worth it because the best error handling is to ignore it. 506 */ 507 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 508 { 509 if (wrmsr_safe(msr, a, b) < 0) { 510 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 511 smp_processor_id(), msr, a, b); 512 } 513 } 514 515 /** 516 * set_fixed_range - checks & updates a fixed-range MTRR if it 517 * differs from the value it should have 518 * @msr: MSR address of the MTTR which should be checked and updated 519 * @changed: pointer which indicates whether the MTRR needed to be changed 520 * @msrwords: pointer to the MSR values which the MSR should have 521 */ 522 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 523 { 524 unsigned lo, hi; 525 526 rdmsr(msr, lo, hi); 527 528 if (lo != msrwords[0] || hi != msrwords[1]) { 529 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 530 *changed = true; 531 } 532 } 533 534 /** 535 * generic_get_free_region - Get a free MTRR. 536 * @base: The starting (base) address of the region. 537 * @size: The size (in bytes) of the region. 538 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 539 * 540 * Returns: The index of the region on success, else negative on error. 541 */ 542 int 543 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 544 { 545 unsigned long lbase, lsize; 546 mtrr_type ltype; 547 int i, max; 548 549 max = num_var_ranges; 550 if (replace_reg >= 0 && replace_reg < max) 551 return replace_reg; 552 553 for (i = 0; i < max; ++i) { 554 mtrr_if->get(i, &lbase, &lsize, <ype); 555 if (lsize == 0) 556 return i; 557 } 558 559 return -ENOSPC; 560 } 561 562 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 563 unsigned long *size, mtrr_type *type) 564 { 565 u32 mask_lo, mask_hi, base_lo, base_hi; 566 unsigned int hi; 567 u64 tmp, mask; 568 569 /* 570 * get_mtrr doesn't need to update mtrr_state, also it could be called 571 * from any cpu, so try to print it out directly. 572 */ 573 get_cpu(); 574 575 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 576 577 if ((mask_lo & 0x800) == 0) { 578 /* Invalid (i.e. free) range */ 579 *base = 0; 580 *size = 0; 581 *type = 0; 582 goto out_put_cpu; 583 } 584 585 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 586 587 /* Work out the shifted address mask: */ 588 tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; 589 mask = size_or_mask | tmp; 590 591 /* Expand tmp with high bits to all 1s: */ 592 hi = fls64(tmp); 593 if (hi > 0) { 594 tmp |= ~((1ULL<<(hi - 1)) - 1); 595 596 if (tmp != mask) { 597 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); 598 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 599 mask = tmp; 600 } 601 } 602 603 /* 604 * This works correctly if size is a power of two, i.e. a 605 * contiguous range: 606 */ 607 *size = -mask; 608 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 609 *type = base_lo & 0xff; 610 611 out_put_cpu: 612 put_cpu(); 613 } 614 615 /** 616 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they 617 * differ from the saved set 618 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 619 */ 620 static int set_fixed_ranges(mtrr_type *frs) 621 { 622 unsigned long long *saved = (unsigned long long *)frs; 623 bool changed = false; 624 int block = -1, range; 625 626 k8_check_syscfg_dram_mod_en(); 627 628 while (fixed_range_blocks[++block].ranges) { 629 for (range = 0; range < fixed_range_blocks[block].ranges; range++) 630 set_fixed_range(fixed_range_blocks[block].base_msr + range, 631 &changed, (unsigned int *)saved++); 632 } 633 634 return changed; 635 } 636 637 /* 638 * Set the MSR pair relating to a var range. 639 * Returns true if changes are made. 640 */ 641 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 642 { 643 unsigned int lo, hi; 644 bool changed = false; 645 646 rdmsr(MTRRphysBase_MSR(index), lo, hi); 647 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 648 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 649 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 650 651 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 652 changed = true; 653 } 654 655 rdmsr(MTRRphysMask_MSR(index), lo, hi); 656 657 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL) 658 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 659 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 660 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 661 changed = true; 662 } 663 return changed; 664 } 665 666 static u32 deftype_lo, deftype_hi; 667 668 /** 669 * set_mtrr_state - Set the MTRR state for this CPU. 670 * 671 * NOTE: The CPU must already be in a safe state for MTRR changes, including 672 * measures that only a single CPU can be active in set_mtrr_state() in 673 * order to not be subject to races for usage of deftype_lo. This is 674 * accomplished by taking cache_disable_lock. 675 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 676 */ 677 static unsigned long set_mtrr_state(void) 678 { 679 unsigned long change_mask = 0; 680 unsigned int i; 681 682 for (i = 0; i < num_var_ranges; i++) { 683 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 684 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 685 } 686 687 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 688 change_mask |= MTRR_CHANGE_MASK_FIXED; 689 690 /* 691 * Set_mtrr_restore restores the old value of MTRRdefType, 692 * so to set it we fiddle with the saved value: 693 */ 694 if ((deftype_lo & 0xff) != mtrr_state.def_type 695 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 696 697 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | 698 (mtrr_state.enabled << 10); 699 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 700 } 701 702 return change_mask; 703 } 704 705 void mtrr_disable(void) 706 { 707 /* Save MTRR state */ 708 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 709 710 /* Disable MTRRs, and set the default type to uncached */ 711 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 712 } 713 714 void mtrr_enable(void) 715 { 716 /* Intel (P6) standard MTRRs */ 717 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 718 } 719 720 void mtrr_generic_set_state(void) 721 { 722 unsigned long mask, count; 723 724 /* Actually set the state */ 725 mask = set_mtrr_state(); 726 727 /* Use the atomic bitops to update the global mask */ 728 for (count = 0; count < sizeof(mask) * 8; ++count) { 729 if (mask & 0x01) 730 set_bit(count, &smp_changes_mask); 731 mask >>= 1; 732 } 733 } 734 735 /** 736 * generic_set_mtrr - set variable MTRR register on the local CPU. 737 * 738 * @reg: The register to set. 739 * @base: The base address of the region. 740 * @size: The size of the region. If this is 0 the region is disabled. 741 * @type: The type of the region. 742 * 743 * Returns nothing. 744 */ 745 static void generic_set_mtrr(unsigned int reg, unsigned long base, 746 unsigned long size, mtrr_type type) 747 { 748 unsigned long flags; 749 struct mtrr_var_range *vr; 750 751 vr = &mtrr_state.var_ranges[reg]; 752 753 local_irq_save(flags); 754 cache_disable(); 755 756 if (size == 0) { 757 /* 758 * The invalid bit is kept in the mask, so we simply 759 * clear the relevant mask register to disable a range. 760 */ 761 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 762 memset(vr, 0, sizeof(struct mtrr_var_range)); 763 } else { 764 vr->base_lo = base << PAGE_SHIFT | type; 765 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT); 766 vr->mask_lo = -size << PAGE_SHIFT | 0x800; 767 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT); 768 769 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 770 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 771 } 772 773 cache_enable(); 774 local_irq_restore(flags); 775 } 776 777 int generic_validate_add_page(unsigned long base, unsigned long size, 778 unsigned int type) 779 { 780 unsigned long lbase, last; 781 782 /* 783 * For Intel PPro stepping <= 7 784 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF 785 */ 786 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 787 boot_cpu_data.x86_model == 1 && 788 boot_cpu_data.x86_stepping <= 7) { 789 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 790 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 791 return -EINVAL; 792 } 793 if (!(base + size < 0x70000 || base > 0x7003F) && 794 (type == MTRR_TYPE_WRCOMB 795 || type == MTRR_TYPE_WRBACK)) { 796 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 797 return -EINVAL; 798 } 799 } 800 801 /* 802 * Check upper bits of base and last are equal and lower bits are 0 803 * for base and 1 for last 804 */ 805 last = base + size - 1; 806 for (lbase = base; !(lbase & 1) && (last & 1); 807 lbase = lbase >> 1, last = last >> 1) 808 ; 809 if (lbase != last) { 810 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); 811 return -EINVAL; 812 } 813 return 0; 814 } 815 816 static int generic_have_wrcomb(void) 817 { 818 unsigned long config, dummy; 819 rdmsr(MSR_MTRRcap, config, dummy); 820 return config & (1 << 10); 821 } 822 823 int positive_have_wrcomb(void) 824 { 825 return 1; 826 } 827 828 /* 829 * Generic structure... 830 */ 831 const struct mtrr_ops generic_mtrr_ops = { 832 .get = generic_get_mtrr, 833 .get_free_region = generic_get_free_region, 834 .set = generic_set_mtrr, 835 .validate_add_page = generic_validate_add_page, 836 .have_wrcomb = generic_have_wrcomb, 837 }; 838