1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 4 * because MTRRs can span up to 40 bits (36bits on most modern x86) 5 */ 6 7 #include <linux/export.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/mm.h> 11 #include <linux/cc_platform.h> 12 #include <asm/processor-flags.h> 13 #include <asm/cacheinfo.h> 14 #include <asm/cpufeature.h> 15 #include <asm/hypervisor.h> 16 #include <asm/mshyperv.h> 17 #include <asm/tlbflush.h> 18 #include <asm/mtrr.h> 19 #include <asm/msr.h> 20 #include <asm/memtype.h> 21 22 #include "mtrr.h" 23 24 struct fixed_range_block { 25 int base_msr; /* start address of an MTRR block */ 26 int ranges; /* number of MTRRs in this block */ 27 }; 28 29 static struct fixed_range_block fixed_range_blocks[] = { 30 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ 31 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ 32 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ 33 {} 34 }; 35 36 static unsigned long smp_changes_mask; 37 static int mtrr_state_set; 38 u64 mtrr_tom2; 39 40 struct mtrr_state_type mtrr_state; 41 EXPORT_SYMBOL_GPL(mtrr_state); 42 43 /* Reserved bits in the high portion of the MTRRphysBaseN MSR. */ 44 u32 phys_hi_rsvd; 45 46 /* 47 * BIOS is expected to clear MtrrFixDramModEn bit, see for example 48 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD 49 * Opteron Processors" (26094 Rev. 3.30 February 2006), section 50 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set 51 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to 52 * 0 for operation." 53 */ 54 static inline void k8_check_syscfg_dram_mod_en(void) 55 { 56 u32 lo, hi; 57 58 if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && 59 (boot_cpu_data.x86 >= 0x0f))) 60 return; 61 62 rdmsr(MSR_AMD64_SYSCFG, lo, hi); 63 if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) { 64 pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]" 65 " not cleared by BIOS, clearing this bit\n", 66 smp_processor_id()); 67 lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY; 68 mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi); 69 } 70 } 71 72 /* Get the size of contiguous MTRR range */ 73 static u64 get_mtrr_size(u64 mask) 74 { 75 u64 size; 76 77 mask |= (u64)phys_hi_rsvd << 32; 78 size = -mask; 79 80 return size; 81 } 82 83 /* 84 * Check and return the effective type for MTRR-MTRR type overlap. 85 * Returns 1 if the effective type is UNCACHEABLE, else returns 0 86 */ 87 static int check_type_overlap(u8 *prev, u8 *curr) 88 { 89 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { 90 *prev = MTRR_TYPE_UNCACHABLE; 91 *curr = MTRR_TYPE_UNCACHABLE; 92 return 1; 93 } 94 95 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || 96 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { 97 *prev = MTRR_TYPE_WRTHROUGH; 98 *curr = MTRR_TYPE_WRTHROUGH; 99 } 100 101 if (*prev != *curr) { 102 *prev = MTRR_TYPE_UNCACHABLE; 103 *curr = MTRR_TYPE_UNCACHABLE; 104 return 1; 105 } 106 107 return 0; 108 } 109 110 /** 111 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries 112 * 113 * Return the MTRR fixed memory type of 'start'. 114 * 115 * MTRR fixed entries are divided into the following ways: 116 * 0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges 117 * 0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges 118 * 0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges 119 * 120 * Return Values: 121 * MTRR_TYPE_(type) - Matched memory type 122 * MTRR_TYPE_INVALID - Unmatched 123 */ 124 static u8 mtrr_type_lookup_fixed(u64 start, u64 end) 125 { 126 int idx; 127 128 if (start >= 0x100000) 129 return MTRR_TYPE_INVALID; 130 131 /* 0x0 - 0x7FFFF */ 132 if (start < 0x80000) { 133 idx = 0; 134 idx += (start >> 16); 135 return mtrr_state.fixed_ranges[idx]; 136 /* 0x80000 - 0xBFFFF */ 137 } else if (start < 0xC0000) { 138 idx = 1 * 8; 139 idx += ((start - 0x80000) >> 14); 140 return mtrr_state.fixed_ranges[idx]; 141 } 142 143 /* 0xC0000 - 0xFFFFF */ 144 idx = 3 * 8; 145 idx += ((start - 0xC0000) >> 12); 146 return mtrr_state.fixed_ranges[idx]; 147 } 148 149 /** 150 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries 151 * 152 * Return Value: 153 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched) 154 * 155 * Output Arguments: 156 * repeat - Set to 1 when [start:end] spanned across MTRR range and type 157 * returned corresponds only to [start:*partial_end]. Caller has 158 * to lookup again for [*partial_end:end]. 159 * 160 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the 161 * region is fully covered by a single MTRR entry or the default 162 * type. 163 */ 164 static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end, 165 int *repeat, u8 *uniform) 166 { 167 int i; 168 u64 base, mask; 169 u8 prev_match, curr_match; 170 171 *repeat = 0; 172 *uniform = 1; 173 174 prev_match = MTRR_TYPE_INVALID; 175 for (i = 0; i < num_var_ranges; ++i) { 176 unsigned short start_state, end_state, inclusive; 177 178 if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V)) 179 continue; 180 181 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) + 182 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK); 183 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) + 184 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK); 185 186 start_state = ((start & mask) == (base & mask)); 187 end_state = ((end & mask) == (base & mask)); 188 inclusive = ((start < base) && (end > base)); 189 190 if ((start_state != end_state) || inclusive) { 191 /* 192 * We have start:end spanning across an MTRR. 193 * We split the region into either 194 * 195 * - start_state:1 196 * (start:mtrr_end)(mtrr_end:end) 197 * - end_state:1 198 * (start:mtrr_start)(mtrr_start:end) 199 * - inclusive:1 200 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end) 201 * 202 * depending on kind of overlap. 203 * 204 * Return the type of the first region and a pointer 205 * to the start of next region so that caller will be 206 * advised to lookup again after having adjusted start 207 * and end. 208 * 209 * Note: This way we handle overlaps with multiple 210 * entries and the default type properly. 211 */ 212 if (start_state) 213 *partial_end = base + get_mtrr_size(mask); 214 else 215 *partial_end = base; 216 217 if (unlikely(*partial_end <= start)) { 218 WARN_ON(1); 219 *partial_end = start + PAGE_SIZE; 220 } 221 222 end = *partial_end - 1; /* end is inclusive */ 223 *repeat = 1; 224 *uniform = 0; 225 } 226 227 if ((start & mask) != (base & mask)) 228 continue; 229 230 curr_match = mtrr_state.var_ranges[i].base_lo & MTRR_PHYSBASE_TYPE; 231 if (prev_match == MTRR_TYPE_INVALID) { 232 prev_match = curr_match; 233 continue; 234 } 235 236 *uniform = 0; 237 if (check_type_overlap(&prev_match, &curr_match)) 238 return curr_match; 239 } 240 241 if (prev_match != MTRR_TYPE_INVALID) 242 return prev_match; 243 244 return mtrr_state.def_type; 245 } 246 247 /** 248 * mtrr_overwrite_state - set static MTRR state 249 * 250 * Used to set MTRR state via different means (e.g. with data obtained from 251 * a hypervisor). 252 * Is allowed only for special cases when running virtualized. Must be called 253 * from the x86_init.hyper.init_platform() hook. It can be called only once. 254 * The MTRR state can't be changed afterwards. To ensure that, X86_FEATURE_MTRR 255 * is cleared. 256 */ 257 void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var, 258 mtrr_type def_type) 259 { 260 unsigned int i; 261 262 /* Only allowed to be called once before mtrr_bp_init(). */ 263 if (WARN_ON_ONCE(mtrr_state_set)) 264 return; 265 266 /* Only allowed when running virtualized. */ 267 if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) 268 return; 269 270 /* 271 * Only allowed for special virtualization cases: 272 * - when running as Hyper-V, SEV-SNP guest using vTOM 273 * - when running as Xen PV guest 274 * - when running as SEV-SNP or TDX guest to avoid unnecessary 275 * VMM communication/Virtualization exceptions (#VC, #VE) 276 */ 277 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && 278 !hv_is_isolation_supported() && 279 !cpu_feature_enabled(X86_FEATURE_XENPV) && 280 !cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) 281 return; 282 283 /* Disable MTRR in order to disable MTRR modifications. */ 284 setup_clear_cpu_cap(X86_FEATURE_MTRR); 285 286 if (var) { 287 if (num_var > MTRR_MAX_VAR_RANGES) { 288 pr_warn("Trying to overwrite MTRR state with %u variable entries\n", 289 num_var); 290 num_var = MTRR_MAX_VAR_RANGES; 291 } 292 for (i = 0; i < num_var; i++) 293 mtrr_state.var_ranges[i] = var[i]; 294 num_var_ranges = num_var; 295 } 296 297 mtrr_state.def_type = def_type; 298 mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED; 299 300 mtrr_state_set = 1; 301 } 302 303 /** 304 * mtrr_type_lookup - look up memory type in MTRR 305 * 306 * Return Values: 307 * MTRR_TYPE_(type) - The effective MTRR type for the region 308 * MTRR_TYPE_INVALID - MTRR is disabled 309 * 310 * Output Argument: 311 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the 312 * region is fully covered by a single MTRR entry or the default 313 * type. 314 */ 315 u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform) 316 { 317 u8 type, prev_type, is_uniform = 1, dummy; 318 int repeat; 319 u64 partial_end; 320 321 /* Make end inclusive instead of exclusive */ 322 end--; 323 324 if (!mtrr_state_set) 325 return MTRR_TYPE_INVALID; 326 327 if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED)) 328 return MTRR_TYPE_INVALID; 329 330 /* 331 * Look up the fixed ranges first, which take priority over 332 * the variable ranges. 333 */ 334 if ((start < 0x100000) && 335 (mtrr_state.have_fixed) && 336 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { 337 is_uniform = 0; 338 type = mtrr_type_lookup_fixed(start, end); 339 goto out; 340 } 341 342 /* 343 * Look up the variable ranges. Look of multiple ranges matching 344 * this address and pick type as per MTRR precedence. 345 */ 346 type = mtrr_type_lookup_variable(start, end, &partial_end, 347 &repeat, &is_uniform); 348 349 /* 350 * Common path is with repeat = 0. 351 * However, we can have cases where [start:end] spans across some 352 * MTRR ranges and/or the default type. Do repeated lookups for 353 * that case here. 354 */ 355 while (repeat) { 356 prev_type = type; 357 start = partial_end; 358 is_uniform = 0; 359 type = mtrr_type_lookup_variable(start, end, &partial_end, 360 &repeat, &dummy); 361 362 if (check_type_overlap(&prev_type, &type)) 363 goto out; 364 } 365 366 if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2)) 367 type = MTRR_TYPE_WRBACK; 368 369 out: 370 *uniform = is_uniform; 371 return type; 372 } 373 374 /* Get the MSR pair relating to a var range */ 375 static void 376 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 377 { 378 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 379 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 380 } 381 382 /* Fill the MSR pair relating to a var range */ 383 void fill_mtrr_var_range(unsigned int index, 384 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 385 { 386 struct mtrr_var_range *vr; 387 388 vr = mtrr_state.var_ranges; 389 390 vr[index].base_lo = base_lo; 391 vr[index].base_hi = base_hi; 392 vr[index].mask_lo = mask_lo; 393 vr[index].mask_hi = mask_hi; 394 } 395 396 static void get_fixed_ranges(mtrr_type *frs) 397 { 398 unsigned int *p = (unsigned int *)frs; 399 int i; 400 401 k8_check_syscfg_dram_mod_en(); 402 403 rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]); 404 405 for (i = 0; i < 2; i++) 406 rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]); 407 for (i = 0; i < 8; i++) 408 rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]); 409 } 410 411 void mtrr_save_fixed_ranges(void *info) 412 { 413 if (boot_cpu_has(X86_FEATURE_MTRR)) 414 get_fixed_ranges(mtrr_state.fixed_ranges); 415 } 416 417 static unsigned __initdata last_fixed_start; 418 static unsigned __initdata last_fixed_end; 419 static mtrr_type __initdata last_fixed_type; 420 421 static void __init print_fixed_last(void) 422 { 423 if (!last_fixed_end) 424 return; 425 426 pr_debug(" %05X-%05X %s\n", last_fixed_start, 427 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); 428 429 last_fixed_end = 0; 430 } 431 432 static void __init update_fixed_last(unsigned base, unsigned end, 433 mtrr_type type) 434 { 435 last_fixed_start = base; 436 last_fixed_end = end; 437 last_fixed_type = type; 438 } 439 440 static void __init 441 print_fixed(unsigned base, unsigned step, const mtrr_type *types) 442 { 443 unsigned i; 444 445 for (i = 0; i < 8; ++i, ++types, base += step) { 446 if (last_fixed_end == 0) { 447 update_fixed_last(base, base + step, *types); 448 continue; 449 } 450 if (last_fixed_end == base && last_fixed_type == *types) { 451 last_fixed_end = base + step; 452 continue; 453 } 454 /* new segments: gap or different type */ 455 print_fixed_last(); 456 update_fixed_last(base, base + step, *types); 457 } 458 } 459 460 static void __init print_mtrr_state(void) 461 { 462 unsigned int i; 463 int high_width; 464 465 pr_debug("MTRR default type: %s\n", 466 mtrr_attrib_to_str(mtrr_state.def_type)); 467 if (mtrr_state.have_fixed) { 468 pr_debug("MTRR fixed ranges %sabled:\n", 469 ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) && 470 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ? 471 "en" : "dis"); 472 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 473 for (i = 0; i < 2; ++i) 474 print_fixed(0x80000 + i * 0x20000, 0x04000, 475 mtrr_state.fixed_ranges + (i + 1) * 8); 476 for (i = 0; i < 8; ++i) 477 print_fixed(0xC0000 + i * 0x08000, 0x01000, 478 mtrr_state.fixed_ranges + (i + 3) * 8); 479 480 /* tail */ 481 print_fixed_last(); 482 } 483 pr_debug("MTRR variable ranges %sabled:\n", 484 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis"); 485 high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4; 486 487 for (i = 0; i < num_var_ranges; ++i) { 488 if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V) 489 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n", 490 i, 491 high_width, 492 mtrr_state.var_ranges[i].base_hi, 493 mtrr_state.var_ranges[i].base_lo >> 12, 494 high_width, 495 mtrr_state.var_ranges[i].mask_hi, 496 mtrr_state.var_ranges[i].mask_lo >> 12, 497 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 498 MTRR_PHYSBASE_TYPE)); 499 else 500 pr_debug(" %u disabled\n", i); 501 } 502 if (mtrr_tom2) 503 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20); 504 } 505 506 /* Grab all of the MTRR state for this CPU into *state */ 507 bool __init get_mtrr_state(void) 508 { 509 struct mtrr_var_range *vrs; 510 unsigned lo, dummy; 511 unsigned int i; 512 513 vrs = mtrr_state.var_ranges; 514 515 rdmsr(MSR_MTRRcap, lo, dummy); 516 mtrr_state.have_fixed = lo & MTRR_CAP_FIX; 517 518 for (i = 0; i < num_var_ranges; i++) 519 get_mtrr_var_range(i, &vrs[i]); 520 if (mtrr_state.have_fixed) 521 get_fixed_ranges(mtrr_state.fixed_ranges); 522 523 rdmsr(MSR_MTRRdefType, lo, dummy); 524 mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE; 525 mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT; 526 527 if (amd_special_default_mtrr()) { 528 unsigned low, high; 529 530 /* TOP_MEM2 */ 531 rdmsr(MSR_K8_TOP_MEM2, low, high); 532 mtrr_tom2 = high; 533 mtrr_tom2 <<= 32; 534 mtrr_tom2 |= low; 535 mtrr_tom2 &= 0xffffff800000ULL; 536 } 537 538 print_mtrr_state(); 539 540 mtrr_state_set = 1; 541 542 return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED); 543 } 544 545 /* Some BIOS's are messed up and don't set all MTRRs the same! */ 546 void __init mtrr_state_warn(void) 547 { 548 unsigned long mask = smp_changes_mask; 549 550 if (!mask) 551 return; 552 if (mask & MTRR_CHANGE_MASK_FIXED) 553 pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 554 if (mask & MTRR_CHANGE_MASK_VARIABLE) 555 pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n"); 556 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 557 pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 558 559 pr_info("mtrr: probably your BIOS does not setup all CPUs.\n"); 560 pr_info("mtrr: corrected configuration.\n"); 561 } 562 563 /* 564 * Doesn't attempt to pass an error out to MTRR users 565 * because it's quite complicated in some cases and probably not 566 * worth it because the best error handling is to ignore it. 567 */ 568 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 569 { 570 if (wrmsr_safe(msr, a, b) < 0) { 571 pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 572 smp_processor_id(), msr, a, b); 573 } 574 } 575 576 /** 577 * set_fixed_range - checks & updates a fixed-range MTRR if it 578 * differs from the value it should have 579 * @msr: MSR address of the MTTR which should be checked and updated 580 * @changed: pointer which indicates whether the MTRR needed to be changed 581 * @msrwords: pointer to the MSR values which the MSR should have 582 */ 583 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords) 584 { 585 unsigned lo, hi; 586 587 rdmsr(msr, lo, hi); 588 589 if (lo != msrwords[0] || hi != msrwords[1]) { 590 mtrr_wrmsr(msr, msrwords[0], msrwords[1]); 591 *changed = true; 592 } 593 } 594 595 /** 596 * generic_get_free_region - Get a free MTRR. 597 * @base: The starting (base) address of the region. 598 * @size: The size (in bytes) of the region. 599 * @replace_reg: mtrr index to be replaced; set to invalid value if none. 600 * 601 * Returns: The index of the region on success, else negative on error. 602 */ 603 int 604 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 605 { 606 unsigned long lbase, lsize; 607 mtrr_type ltype; 608 int i, max; 609 610 max = num_var_ranges; 611 if (replace_reg >= 0 && replace_reg < max) 612 return replace_reg; 613 614 for (i = 0; i < max; ++i) { 615 mtrr_if->get(i, &lbase, &lsize, <ype); 616 if (lsize == 0) 617 return i; 618 } 619 620 return -ENOSPC; 621 } 622 623 static void generic_get_mtrr(unsigned int reg, unsigned long *base, 624 unsigned long *size, mtrr_type *type) 625 { 626 u32 mask_lo, mask_hi, base_lo, base_hi; 627 unsigned int hi; 628 u64 tmp, mask; 629 630 /* 631 * get_mtrr doesn't need to update mtrr_state, also it could be called 632 * from any cpu, so try to print it out directly. 633 */ 634 get_cpu(); 635 636 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 637 638 if (!(mask_lo & MTRR_PHYSMASK_V)) { 639 /* Invalid (i.e. free) range */ 640 *base = 0; 641 *size = 0; 642 *type = 0; 643 goto out_put_cpu; 644 } 645 646 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); 647 648 /* Work out the shifted address mask: */ 649 tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK); 650 mask = (u64)phys_hi_rsvd << 32 | tmp; 651 652 /* Expand tmp with high bits to all 1s: */ 653 hi = fls64(tmp); 654 if (hi > 0) { 655 tmp |= ~((1ULL<<(hi - 1)) - 1); 656 657 if (tmp != mask) { 658 pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); 659 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 660 mask = tmp; 661 } 662 } 663 664 /* 665 * This works correctly if size is a power of two, i.e. a 666 * contiguous range: 667 */ 668 *size = -mask >> PAGE_SHIFT; 669 *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; 670 *type = base_lo & MTRR_PHYSBASE_TYPE; 671 672 out_put_cpu: 673 put_cpu(); 674 } 675 676 /** 677 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they 678 * differ from the saved set 679 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 680 */ 681 static int set_fixed_ranges(mtrr_type *frs) 682 { 683 unsigned long long *saved = (unsigned long long *)frs; 684 bool changed = false; 685 int block = -1, range; 686 687 k8_check_syscfg_dram_mod_en(); 688 689 while (fixed_range_blocks[++block].ranges) { 690 for (range = 0; range < fixed_range_blocks[block].ranges; range++) 691 set_fixed_range(fixed_range_blocks[block].base_msr + range, 692 &changed, (unsigned int *)saved++); 693 } 694 695 return changed; 696 } 697 698 /* 699 * Set the MSR pair relating to a var range. 700 * Returns true if changes are made. 701 */ 702 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 703 { 704 unsigned int lo, hi; 705 bool changed = false; 706 707 rdmsr(MTRRphysBase_MSR(index), lo, hi); 708 if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD) 709 || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { 710 711 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 712 changed = true; 713 } 714 715 rdmsr(MTRRphysMask_MSR(index), lo, hi); 716 717 if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD) 718 || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) { 719 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 720 changed = true; 721 } 722 return changed; 723 } 724 725 static u32 deftype_lo, deftype_hi; 726 727 /** 728 * set_mtrr_state - Set the MTRR state for this CPU. 729 * 730 * NOTE: The CPU must already be in a safe state for MTRR changes, including 731 * measures that only a single CPU can be active in set_mtrr_state() in 732 * order to not be subject to races for usage of deftype_lo. This is 733 * accomplished by taking cache_disable_lock. 734 * RETURNS: 0 if no changes made, else a mask indicating what was changed. 735 */ 736 static unsigned long set_mtrr_state(void) 737 { 738 unsigned long change_mask = 0; 739 unsigned int i; 740 741 for (i = 0; i < num_var_ranges; i++) { 742 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 743 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 744 } 745 746 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 747 change_mask |= MTRR_CHANGE_MASK_FIXED; 748 749 /* 750 * Set_mtrr_restore restores the old value of MTRRdefType, 751 * so to set it we fiddle with the saved value: 752 */ 753 if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type || 754 ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) { 755 756 deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) | 757 mtrr_state.def_type | 758 (mtrr_state.enabled << MTRR_STATE_SHIFT); 759 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 760 } 761 762 return change_mask; 763 } 764 765 void mtrr_disable(void) 766 { 767 /* Save MTRR state */ 768 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 769 770 /* Disable MTRRs, and set the default type to uncached */ 771 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi); 772 } 773 774 void mtrr_enable(void) 775 { 776 /* Intel (P6) standard MTRRs */ 777 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 778 } 779 780 void mtrr_generic_set_state(void) 781 { 782 unsigned long mask, count; 783 784 /* Actually set the state */ 785 mask = set_mtrr_state(); 786 787 /* Use the atomic bitops to update the global mask */ 788 for (count = 0; count < sizeof(mask) * 8; ++count) { 789 if (mask & 0x01) 790 set_bit(count, &smp_changes_mask); 791 mask >>= 1; 792 } 793 } 794 795 /** 796 * generic_set_mtrr - set variable MTRR register on the local CPU. 797 * 798 * @reg: The register to set. 799 * @base: The base address of the region. 800 * @size: The size of the region. If this is 0 the region is disabled. 801 * @type: The type of the region. 802 * 803 * Returns nothing. 804 */ 805 static void generic_set_mtrr(unsigned int reg, unsigned long base, 806 unsigned long size, mtrr_type type) 807 { 808 unsigned long flags; 809 struct mtrr_var_range *vr; 810 811 vr = &mtrr_state.var_ranges[reg]; 812 813 local_irq_save(flags); 814 cache_disable(); 815 816 if (size == 0) { 817 /* 818 * The invalid bit is kept in the mask, so we simply 819 * clear the relevant mask register to disable a range. 820 */ 821 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 822 memset(vr, 0, sizeof(struct mtrr_var_range)); 823 } else { 824 vr->base_lo = base << PAGE_SHIFT | type; 825 vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; 826 vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V; 827 vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd; 828 829 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi); 830 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi); 831 } 832 833 cache_enable(); 834 local_irq_restore(flags); 835 } 836 837 int generic_validate_add_page(unsigned long base, unsigned long size, 838 unsigned int type) 839 { 840 unsigned long lbase, last; 841 842 /* 843 * For Intel PPro stepping <= 7 844 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF 845 */ 846 if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 && 847 boot_cpu_data.x86_model == 1 && 848 boot_cpu_data.x86_stepping <= 7) { 849 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 850 pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 851 return -EINVAL; 852 } 853 if (!(base + size < 0x70000 || base > 0x7003F) && 854 (type == MTRR_TYPE_WRCOMB 855 || type == MTRR_TYPE_WRBACK)) { 856 pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 857 return -EINVAL; 858 } 859 } 860 861 /* 862 * Check upper bits of base and last are equal and lower bits are 0 863 * for base and 1 for last 864 */ 865 last = base + size - 1; 866 for (lbase = base; !(lbase & 1) && (last & 1); 867 lbase = lbase >> 1, last = last >> 1) 868 ; 869 if (lbase != last) { 870 pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size); 871 return -EINVAL; 872 } 873 return 0; 874 } 875 876 static int generic_have_wrcomb(void) 877 { 878 unsigned long config, dummy; 879 rdmsr(MSR_MTRRcap, config, dummy); 880 return config & MTRR_CAP_WC; 881 } 882 883 int positive_have_wrcomb(void) 884 { 885 return 1; 886 } 887 888 /* 889 * Generic structure... 890 */ 891 const struct mtrr_ops generic_mtrr_ops = { 892 .get = generic_get_mtrr, 893 .get_free_region = generic_get_free_region, 894 .set = generic_set_mtrr, 895 .validate_add_page = generic_validate_add_page, 896 .have_wrcomb = generic_have_wrcomb, 897 }; 898