xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 1ca12099040fec8c6bbcd9fabf37f04ac0d08e48)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
4  * because MTRRs can span up to 40 bits (36bits on most modern x86)
5  */
6 
7 #include <linux/export.h>
8 #include <linux/init.h>
9 #include <linux/io.h>
10 #include <linux/mm.h>
11 #include <linux/cc_platform.h>
12 #include <asm/processor-flags.h>
13 #include <asm/cacheinfo.h>
14 #include <asm/cpufeature.h>
15 #include <asm/hypervisor.h>
16 #include <asm/mshyperv.h>
17 #include <asm/tlbflush.h>
18 #include <asm/mtrr.h>
19 #include <asm/msr.h>
20 #include <asm/memtype.h>
21 
22 #include "mtrr.h"
23 
24 struct fixed_range_block {
25 	int base_msr;		/* start address of an MTRR block */
26 	int ranges;		/* number of MTRRs in this block  */
27 };
28 
29 static struct fixed_range_block fixed_range_blocks[] = {
30 	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
31 	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
32 	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
33 	{}
34 };
35 
36 static unsigned long smp_changes_mask;
37 static int mtrr_state_set;
38 u64 mtrr_tom2;
39 
40 struct mtrr_state_type mtrr_state;
41 EXPORT_SYMBOL_GPL(mtrr_state);
42 
43 /* Reserved bits in the high portion of the MTRRphysBaseN MSR. */
44 u32 phys_hi_rsvd;
45 
46 /*
47  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
48  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
49  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
50  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
51  * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
52  * 0 for operation."
53  */
54 static inline void k8_check_syscfg_dram_mod_en(void)
55 {
56 	u32 lo, hi;
57 
58 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
59 	      (boot_cpu_data.x86 >= 0x0f)))
60 		return;
61 
62 	rdmsr(MSR_AMD64_SYSCFG, lo, hi);
63 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
64 		pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
65 		       " not cleared by BIOS, clearing this bit\n",
66 		       smp_processor_id());
67 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
68 		mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
69 	}
70 }
71 
72 /* Get the size of contiguous MTRR range */
73 static u64 get_mtrr_size(u64 mask)
74 {
75 	u64 size;
76 
77 	mask |= (u64)phys_hi_rsvd << 32;
78 	size = -mask;
79 
80 	return size;
81 }
82 
83 static u8 get_effective_type(u8 type1, u8 type2)
84 {
85 	if (type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE)
86 		return MTRR_TYPE_UNCACHABLE;
87 
88 	if ((type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH) ||
89 	    (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK))
90 		return MTRR_TYPE_WRTHROUGH;
91 
92 	if (type1 != type2)
93 		return MTRR_TYPE_UNCACHABLE;
94 
95 	return type1;
96 }
97 
98 /*
99  * Check and return the effective type for MTRR-MTRR type overlap.
100  * Returns true if the effective type is UNCACHEABLE, else returns false
101  */
102 static bool check_type_overlap(u8 *prev, u8 *curr)
103 {
104 	*prev = *curr = get_effective_type(*curr, *prev);
105 
106 	return *prev == MTRR_TYPE_UNCACHABLE;
107 }
108 
109 /**
110  * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
111  *
112  * Return the MTRR fixed memory type of 'start'.
113  *
114  * MTRR fixed entries are divided into the following ways:
115  *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
116  *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
117  *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
118  *
119  * Return Values:
120  * MTRR_TYPE_(type)  - Matched memory type
121  * MTRR_TYPE_INVALID - Unmatched
122  */
123 static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
124 {
125 	int idx;
126 
127 	if (start >= 0x100000)
128 		return MTRR_TYPE_INVALID;
129 
130 	/* 0x0 - 0x7FFFF */
131 	if (start < 0x80000) {
132 		idx = 0;
133 		idx += (start >> 16);
134 		return mtrr_state.fixed_ranges[idx];
135 	/* 0x80000 - 0xBFFFF */
136 	} else if (start < 0xC0000) {
137 		idx = 1 * 8;
138 		idx += ((start - 0x80000) >> 14);
139 		return mtrr_state.fixed_ranges[idx];
140 	}
141 
142 	/* 0xC0000 - 0xFFFFF */
143 	idx = 3 * 8;
144 	idx += ((start - 0xC0000) >> 12);
145 	return mtrr_state.fixed_ranges[idx];
146 }
147 
148 /**
149  * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
150  *
151  * Return Value:
152  * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
153  *
154  * Output Arguments:
155  * repeat - Set to 1 when [start:end] spanned across MTRR range and type
156  *	    returned corresponds only to [start:*partial_end].  Caller has
157  *	    to lookup again for [*partial_end:end].
158  *
159  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
160  *	     region is fully covered by a single MTRR entry or the default
161  *	     type.
162  */
163 static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
164 				    int *repeat, u8 *uniform)
165 {
166 	int i;
167 	u64 base, mask;
168 	u8 prev_match, curr_match;
169 
170 	*repeat = 0;
171 	*uniform = 1;
172 
173 	prev_match = MTRR_TYPE_INVALID;
174 	for (i = 0; i < num_var_ranges; ++i) {
175 		unsigned short start_state, end_state, inclusive;
176 
177 		if (!(mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V))
178 			continue;
179 
180 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
181 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
182 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
183 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
184 
185 		start_state = ((start & mask) == (base & mask));
186 		end_state = ((end & mask) == (base & mask));
187 		inclusive = ((start < base) && (end > base));
188 
189 		if ((start_state != end_state) || inclusive) {
190 			/*
191 			 * We have start:end spanning across an MTRR.
192 			 * We split the region into either
193 			 *
194 			 * - start_state:1
195 			 * (start:mtrr_end)(mtrr_end:end)
196 			 * - end_state:1
197 			 * (start:mtrr_start)(mtrr_start:end)
198 			 * - inclusive:1
199 			 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
200 			 *
201 			 * depending on kind of overlap.
202 			 *
203 			 * Return the type of the first region and a pointer
204 			 * to the start of next region so that caller will be
205 			 * advised to lookup again after having adjusted start
206 			 * and end.
207 			 *
208 			 * Note: This way we handle overlaps with multiple
209 			 * entries and the default type properly.
210 			 */
211 			if (start_state)
212 				*partial_end = base + get_mtrr_size(mask);
213 			else
214 				*partial_end = base;
215 
216 			if (unlikely(*partial_end <= start)) {
217 				WARN_ON(1);
218 				*partial_end = start + PAGE_SIZE;
219 			}
220 
221 			end = *partial_end - 1; /* end is inclusive */
222 			*repeat = 1;
223 			*uniform = 0;
224 		}
225 
226 		if ((start & mask) != (base & mask))
227 			continue;
228 
229 		curr_match = mtrr_state.var_ranges[i].base_lo & MTRR_PHYSBASE_TYPE;
230 		if (prev_match == MTRR_TYPE_INVALID) {
231 			prev_match = curr_match;
232 			continue;
233 		}
234 
235 		*uniform = 0;
236 		if (check_type_overlap(&prev_match, &curr_match))
237 			return curr_match;
238 	}
239 
240 	if (prev_match != MTRR_TYPE_INVALID)
241 		return prev_match;
242 
243 	return mtrr_state.def_type;
244 }
245 
246 /**
247  * mtrr_overwrite_state - set static MTRR state
248  *
249  * Used to set MTRR state via different means (e.g. with data obtained from
250  * a hypervisor).
251  * Is allowed only for special cases when running virtualized. Must be called
252  * from the x86_init.hyper.init_platform() hook.  It can be called only once.
253  * The MTRR state can't be changed afterwards.  To ensure that, X86_FEATURE_MTRR
254  * is cleared.
255  */
256 void mtrr_overwrite_state(struct mtrr_var_range *var, unsigned int num_var,
257 			  mtrr_type def_type)
258 {
259 	unsigned int i;
260 
261 	/* Only allowed to be called once before mtrr_bp_init(). */
262 	if (WARN_ON_ONCE(mtrr_state_set))
263 		return;
264 
265 	/* Only allowed when running virtualized. */
266 	if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
267 		return;
268 
269 	/*
270 	 * Only allowed for special virtualization cases:
271 	 * - when running as Hyper-V, SEV-SNP guest using vTOM
272 	 * - when running as Xen PV guest
273 	 * - when running as SEV-SNP or TDX guest to avoid unnecessary
274 	 *   VMM communication/Virtualization exceptions (#VC, #VE)
275 	 */
276 	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP) &&
277 	    !hv_is_isolation_supported() &&
278 	    !cpu_feature_enabled(X86_FEATURE_XENPV) &&
279 	    !cpu_feature_enabled(X86_FEATURE_TDX_GUEST))
280 		return;
281 
282 	/* Disable MTRR in order to disable MTRR modifications. */
283 	setup_clear_cpu_cap(X86_FEATURE_MTRR);
284 
285 	if (var) {
286 		if (num_var > MTRR_MAX_VAR_RANGES) {
287 			pr_warn("Trying to overwrite MTRR state with %u variable entries\n",
288 				num_var);
289 			num_var = MTRR_MAX_VAR_RANGES;
290 		}
291 		for (i = 0; i < num_var; i++)
292 			mtrr_state.var_ranges[i] = var[i];
293 		num_var_ranges = num_var;
294 	}
295 
296 	mtrr_state.def_type = def_type;
297 	mtrr_state.enabled |= MTRR_STATE_MTRR_ENABLED;
298 
299 	mtrr_state_set = 1;
300 }
301 
302 /**
303  * mtrr_type_lookup - look up memory type in MTRR
304  *
305  * Return Values:
306  * MTRR_TYPE_(type)  - The effective MTRR type for the region
307  * MTRR_TYPE_INVALID - MTRR is disabled
308  *
309  * Output Argument:
310  * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
311  *	     region is fully covered by a single MTRR entry or the default
312  *	     type.
313  */
314 u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
315 {
316 	u8 type, prev_type, is_uniform = 1, dummy;
317 	int repeat;
318 	u64 partial_end;
319 
320 	/* Make end inclusive instead of exclusive */
321 	end--;
322 
323 	if (!mtrr_state_set)
324 		return MTRR_TYPE_INVALID;
325 
326 	if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
327 		return MTRR_TYPE_INVALID;
328 
329 	/*
330 	 * Look up the fixed ranges first, which take priority over
331 	 * the variable ranges.
332 	 */
333 	if ((start < 0x100000) &&
334 	    (mtrr_state.have_fixed) &&
335 	    (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
336 		is_uniform = 0;
337 		type = mtrr_type_lookup_fixed(start, end);
338 		goto out;
339 	}
340 
341 	/*
342 	 * Look up the variable ranges.  Look of multiple ranges matching
343 	 * this address and pick type as per MTRR precedence.
344 	 */
345 	type = mtrr_type_lookup_variable(start, end, &partial_end,
346 					 &repeat, &is_uniform);
347 
348 	/*
349 	 * Common path is with repeat = 0.
350 	 * However, we can have cases where [start:end] spans across some
351 	 * MTRR ranges and/or the default type.  Do repeated lookups for
352 	 * that case here.
353 	 */
354 	while (repeat) {
355 		prev_type = type;
356 		start = partial_end;
357 		is_uniform = 0;
358 		type = mtrr_type_lookup_variable(start, end, &partial_end,
359 						 &repeat, &dummy);
360 
361 		if (check_type_overlap(&prev_type, &type))
362 			goto out;
363 	}
364 
365 	if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
366 		type = MTRR_TYPE_WRBACK;
367 
368 out:
369 	*uniform = is_uniform;
370 	return type;
371 }
372 
373 /* Get the MSR pair relating to a var range */
374 static void
375 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
376 {
377 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
378 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
379 }
380 
381 /* Fill the MSR pair relating to a var range */
382 void fill_mtrr_var_range(unsigned int index,
383 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
384 {
385 	struct mtrr_var_range *vr;
386 
387 	vr = mtrr_state.var_ranges;
388 
389 	vr[index].base_lo = base_lo;
390 	vr[index].base_hi = base_hi;
391 	vr[index].mask_lo = mask_lo;
392 	vr[index].mask_hi = mask_hi;
393 }
394 
395 static void get_fixed_ranges(mtrr_type *frs)
396 {
397 	unsigned int *p = (unsigned int *)frs;
398 	int i;
399 
400 	k8_check_syscfg_dram_mod_en();
401 
402 	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
403 
404 	for (i = 0; i < 2; i++)
405 		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
406 	for (i = 0; i < 8; i++)
407 		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
408 }
409 
410 void mtrr_save_fixed_ranges(void *info)
411 {
412 	if (boot_cpu_has(X86_FEATURE_MTRR))
413 		get_fixed_ranges(mtrr_state.fixed_ranges);
414 }
415 
416 static unsigned __initdata last_fixed_start;
417 static unsigned __initdata last_fixed_end;
418 static mtrr_type __initdata last_fixed_type;
419 
420 static void __init print_fixed_last(void)
421 {
422 	if (!last_fixed_end)
423 		return;
424 
425 	pr_debug("  %05X-%05X %s\n", last_fixed_start,
426 		 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
427 
428 	last_fixed_end = 0;
429 }
430 
431 static void __init update_fixed_last(unsigned base, unsigned end,
432 				     mtrr_type type)
433 {
434 	last_fixed_start = base;
435 	last_fixed_end = end;
436 	last_fixed_type = type;
437 }
438 
439 static void __init
440 print_fixed(unsigned base, unsigned step, const mtrr_type *types)
441 {
442 	unsigned i;
443 
444 	for (i = 0; i < 8; ++i, ++types, base += step) {
445 		if (last_fixed_end == 0) {
446 			update_fixed_last(base, base + step, *types);
447 			continue;
448 		}
449 		if (last_fixed_end == base && last_fixed_type == *types) {
450 			last_fixed_end = base + step;
451 			continue;
452 		}
453 		/* new segments: gap or different type */
454 		print_fixed_last();
455 		update_fixed_last(base, base + step, *types);
456 	}
457 }
458 
459 static void __init print_mtrr_state(void)
460 {
461 	unsigned int i;
462 	int high_width;
463 
464 	pr_debug("MTRR default type: %s\n",
465 		 mtrr_attrib_to_str(mtrr_state.def_type));
466 	if (mtrr_state.have_fixed) {
467 		pr_debug("MTRR fixed ranges %sabled:\n",
468 			((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
469 			 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
470 			 "en" : "dis");
471 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
472 		for (i = 0; i < 2; ++i)
473 			print_fixed(0x80000 + i * 0x20000, 0x04000,
474 				    mtrr_state.fixed_ranges + (i + 1) * 8);
475 		for (i = 0; i < 8; ++i)
476 			print_fixed(0xC0000 + i * 0x08000, 0x01000,
477 				    mtrr_state.fixed_ranges + (i + 3) * 8);
478 
479 		/* tail */
480 		print_fixed_last();
481 	}
482 	pr_debug("MTRR variable ranges %sabled:\n",
483 		 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
484 	high_width = (boot_cpu_data.x86_phys_bits - (32 - PAGE_SHIFT) + 3) / 4;
485 
486 	for (i = 0; i < num_var_ranges; ++i) {
487 		if (mtrr_state.var_ranges[i].mask_lo & MTRR_PHYSMASK_V)
488 			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
489 				 i,
490 				 high_width,
491 				 mtrr_state.var_ranges[i].base_hi,
492 				 mtrr_state.var_ranges[i].base_lo >> 12,
493 				 high_width,
494 				 mtrr_state.var_ranges[i].mask_hi,
495 				 mtrr_state.var_ranges[i].mask_lo >> 12,
496 				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo &
497 						    MTRR_PHYSBASE_TYPE));
498 		else
499 			pr_debug("  %u disabled\n", i);
500 	}
501 	if (mtrr_tom2)
502 		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
503 }
504 
505 /* Grab all of the MTRR state for this CPU into *state */
506 bool __init get_mtrr_state(void)
507 {
508 	struct mtrr_var_range *vrs;
509 	unsigned lo, dummy;
510 	unsigned int i;
511 
512 	vrs = mtrr_state.var_ranges;
513 
514 	rdmsr(MSR_MTRRcap, lo, dummy);
515 	mtrr_state.have_fixed = lo & MTRR_CAP_FIX;
516 
517 	for (i = 0; i < num_var_ranges; i++)
518 		get_mtrr_var_range(i, &vrs[i]);
519 	if (mtrr_state.have_fixed)
520 		get_fixed_ranges(mtrr_state.fixed_ranges);
521 
522 	rdmsr(MSR_MTRRdefType, lo, dummy);
523 	mtrr_state.def_type = lo & MTRR_DEF_TYPE_TYPE;
524 	mtrr_state.enabled = (lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT;
525 
526 	if (amd_special_default_mtrr()) {
527 		unsigned low, high;
528 
529 		/* TOP_MEM2 */
530 		rdmsr(MSR_K8_TOP_MEM2, low, high);
531 		mtrr_tom2 = high;
532 		mtrr_tom2 <<= 32;
533 		mtrr_tom2 |= low;
534 		mtrr_tom2 &= 0xffffff800000ULL;
535 	}
536 
537 	print_mtrr_state();
538 
539 	mtrr_state_set = 1;
540 
541 	return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
542 }
543 
544 /* Some BIOS's are messed up and don't set all MTRRs the same! */
545 void __init mtrr_state_warn(void)
546 {
547 	unsigned long mask = smp_changes_mask;
548 
549 	if (!mask)
550 		return;
551 	if (mask & MTRR_CHANGE_MASK_FIXED)
552 		pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
553 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
554 		pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
555 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
556 		pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
557 
558 	pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
559 	pr_info("mtrr: corrected configuration.\n");
560 }
561 
562 /*
563  * Doesn't attempt to pass an error out to MTRR users
564  * because it's quite complicated in some cases and probably not
565  * worth it because the best error handling is to ignore it.
566  */
567 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
568 {
569 	if (wrmsr_safe(msr, a, b) < 0) {
570 		pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
571 			smp_processor_id(), msr, a, b);
572 	}
573 }
574 
575 /**
576  * set_fixed_range - checks & updates a fixed-range MTRR if it
577  *		     differs from the value it should have
578  * @msr: MSR address of the MTTR which should be checked and updated
579  * @changed: pointer which indicates whether the MTRR needed to be changed
580  * @msrwords: pointer to the MSR values which the MSR should have
581  */
582 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
583 {
584 	unsigned lo, hi;
585 
586 	rdmsr(msr, lo, hi);
587 
588 	if (lo != msrwords[0] || hi != msrwords[1]) {
589 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
590 		*changed = true;
591 	}
592 }
593 
594 /**
595  * generic_get_free_region - Get a free MTRR.
596  * @base: The starting (base) address of the region.
597  * @size: The size (in bytes) of the region.
598  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
599  *
600  * Returns: The index of the region on success, else negative on error.
601  */
602 int
603 generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
604 {
605 	unsigned long lbase, lsize;
606 	mtrr_type ltype;
607 	int i, max;
608 
609 	max = num_var_ranges;
610 	if (replace_reg >= 0 && replace_reg < max)
611 		return replace_reg;
612 
613 	for (i = 0; i < max; ++i) {
614 		mtrr_if->get(i, &lbase, &lsize, &ltype);
615 		if (lsize == 0)
616 			return i;
617 	}
618 
619 	return -ENOSPC;
620 }
621 
622 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
623 			     unsigned long *size, mtrr_type *type)
624 {
625 	u32 mask_lo, mask_hi, base_lo, base_hi;
626 	unsigned int hi;
627 	u64 tmp, mask;
628 
629 	/*
630 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
631 	 * from any cpu, so try to print it out directly.
632 	 */
633 	get_cpu();
634 
635 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
636 
637 	if (!(mask_lo & MTRR_PHYSMASK_V)) {
638 		/*  Invalid (i.e. free) range */
639 		*base = 0;
640 		*size = 0;
641 		*type = 0;
642 		goto out_put_cpu;
643 	}
644 
645 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
646 
647 	/* Work out the shifted address mask: */
648 	tmp = (u64)mask_hi << 32 | (mask_lo & PAGE_MASK);
649 	mask = (u64)phys_hi_rsvd << 32 | tmp;
650 
651 	/* Expand tmp with high bits to all 1s: */
652 	hi = fls64(tmp);
653 	if (hi > 0) {
654 		tmp |= ~((1ULL<<(hi - 1)) - 1);
655 
656 		if (tmp != mask) {
657 			pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
658 			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
659 			mask = tmp;
660 		}
661 	}
662 
663 	/*
664 	 * This works correctly if size is a power of two, i.e. a
665 	 * contiguous range:
666 	 */
667 	*size = -mask >> PAGE_SHIFT;
668 	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
669 	*type = base_lo & MTRR_PHYSBASE_TYPE;
670 
671 out_put_cpu:
672 	put_cpu();
673 }
674 
675 /**
676  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
677  *		      differ from the saved set
678  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
679  */
680 static int set_fixed_ranges(mtrr_type *frs)
681 {
682 	unsigned long long *saved = (unsigned long long *)frs;
683 	bool changed = false;
684 	int block = -1, range;
685 
686 	k8_check_syscfg_dram_mod_en();
687 
688 	while (fixed_range_blocks[++block].ranges) {
689 		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
690 			set_fixed_range(fixed_range_blocks[block].base_msr + range,
691 					&changed, (unsigned int *)saved++);
692 	}
693 
694 	return changed;
695 }
696 
697 /*
698  * Set the MSR pair relating to a var range.
699  * Returns true if changes are made.
700  */
701 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
702 {
703 	unsigned int lo, hi;
704 	bool changed = false;
705 
706 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
707 	if ((vr->base_lo & ~MTRR_PHYSBASE_RSVD) != (lo & ~MTRR_PHYSBASE_RSVD)
708 	    || (vr->base_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
709 
710 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
711 		changed = true;
712 	}
713 
714 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
715 
716 	if ((vr->mask_lo & ~MTRR_PHYSMASK_RSVD) != (lo & ~MTRR_PHYSMASK_RSVD)
717 	    || (vr->mask_hi & ~phys_hi_rsvd) != (hi & ~phys_hi_rsvd)) {
718 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
719 		changed = true;
720 	}
721 	return changed;
722 }
723 
724 static u32 deftype_lo, deftype_hi;
725 
726 /**
727  * set_mtrr_state - Set the MTRR state for this CPU.
728  *
729  * NOTE: The CPU must already be in a safe state for MTRR changes, including
730  *       measures that only a single CPU can be active in set_mtrr_state() in
731  *       order to not be subject to races for usage of deftype_lo. This is
732  *       accomplished by taking cache_disable_lock.
733  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
734  */
735 static unsigned long set_mtrr_state(void)
736 {
737 	unsigned long change_mask = 0;
738 	unsigned int i;
739 
740 	for (i = 0; i < num_var_ranges; i++) {
741 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
742 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
743 	}
744 
745 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
746 		change_mask |= MTRR_CHANGE_MASK_FIXED;
747 
748 	/*
749 	 * Set_mtrr_restore restores the old value of MTRRdefType,
750 	 * so to set it we fiddle with the saved value:
751 	 */
752 	if ((deftype_lo & MTRR_DEF_TYPE_TYPE) != mtrr_state.def_type ||
753 	    ((deftype_lo & MTRR_DEF_TYPE_ENABLE) >> MTRR_STATE_SHIFT) != mtrr_state.enabled) {
754 
755 		deftype_lo = (deftype_lo & MTRR_DEF_TYPE_DISABLE) |
756 			     mtrr_state.def_type |
757 			     (mtrr_state.enabled << MTRR_STATE_SHIFT);
758 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
759 	}
760 
761 	return change_mask;
762 }
763 
764 void mtrr_disable(void)
765 {
766 	/* Save MTRR state */
767 	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
768 
769 	/* Disable MTRRs, and set the default type to uncached */
770 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & MTRR_DEF_TYPE_DISABLE, deftype_hi);
771 }
772 
773 void mtrr_enable(void)
774 {
775 	/* Intel (P6) standard MTRRs */
776 	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
777 }
778 
779 void mtrr_generic_set_state(void)
780 {
781 	unsigned long mask, count;
782 
783 	/* Actually set the state */
784 	mask = set_mtrr_state();
785 
786 	/* Use the atomic bitops to update the global mask */
787 	for (count = 0; count < sizeof(mask) * 8; ++count) {
788 		if (mask & 0x01)
789 			set_bit(count, &smp_changes_mask);
790 		mask >>= 1;
791 	}
792 }
793 
794 /**
795  * generic_set_mtrr - set variable MTRR register on the local CPU.
796  *
797  * @reg: The register to set.
798  * @base: The base address of the region.
799  * @size: The size of the region. If this is 0 the region is disabled.
800  * @type: The type of the region.
801  *
802  * Returns nothing.
803  */
804 static void generic_set_mtrr(unsigned int reg, unsigned long base,
805 			     unsigned long size, mtrr_type type)
806 {
807 	unsigned long flags;
808 	struct mtrr_var_range *vr;
809 
810 	vr = &mtrr_state.var_ranges[reg];
811 
812 	local_irq_save(flags);
813 	cache_disable();
814 
815 	if (size == 0) {
816 		/*
817 		 * The invalid bit is kept in the mask, so we simply
818 		 * clear the relevant mask register to disable a range.
819 		 */
820 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
821 		memset(vr, 0, sizeof(struct mtrr_var_range));
822 	} else {
823 		vr->base_lo = base << PAGE_SHIFT | type;
824 		vr->base_hi = (base >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
825 		vr->mask_lo = -size << PAGE_SHIFT | MTRR_PHYSMASK_V;
826 		vr->mask_hi = (-size >> (32 - PAGE_SHIFT)) & ~phys_hi_rsvd;
827 
828 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
829 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
830 	}
831 
832 	cache_enable();
833 	local_irq_restore(flags);
834 }
835 
836 int generic_validate_add_page(unsigned long base, unsigned long size,
837 			      unsigned int type)
838 {
839 	unsigned long lbase, last;
840 
841 	/*
842 	 * For Intel PPro stepping <= 7
843 	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
844 	 */
845 	if (mtrr_if == &generic_mtrr_ops && boot_cpu_data.x86 == 6 &&
846 	    boot_cpu_data.x86_model == 1 &&
847 	    boot_cpu_data.x86_stepping <= 7) {
848 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
849 			pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
850 			return -EINVAL;
851 		}
852 		if (!(base + size < 0x70000 || base > 0x7003F) &&
853 		    (type == MTRR_TYPE_WRCOMB
854 		     || type == MTRR_TYPE_WRBACK)) {
855 			pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
856 			return -EINVAL;
857 		}
858 	}
859 
860 	/*
861 	 * Check upper bits of base and last are equal and lower bits are 0
862 	 * for base and 1 for last
863 	 */
864 	last = base + size - 1;
865 	for (lbase = base; !(lbase & 1) && (last & 1);
866 	     lbase = lbase >> 1, last = last >> 1)
867 		;
868 	if (lbase != last) {
869 		pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
870 		return -EINVAL;
871 	}
872 	return 0;
873 }
874 
875 static int generic_have_wrcomb(void)
876 {
877 	unsigned long config, dummy;
878 	rdmsr(MSR_MTRRcap, config, dummy);
879 	return config & MTRR_CAP_WC;
880 }
881 
882 int positive_have_wrcomb(void)
883 {
884 	return 1;
885 }
886 
887 /*
888  * Generic structure...
889  */
890 const struct mtrr_ops generic_mtrr_ops = {
891 	.get			= generic_get_mtrr,
892 	.get_free_region	= generic_get_free_region,
893 	.set			= generic_set_mtrr,
894 	.validate_add_page	= generic_validate_add_page,
895 	.have_wrcomb		= generic_have_wrcomb,
896 };
897