xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 82ced6fd)
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2    because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16 
17 struct fixed_range_block {
18 	int base_msr; /* start address of an MTRR block */
19 	int ranges;   /* number of MTRRs in this block  */
20 };
21 
22 static struct fixed_range_block fixed_range_blocks[] = {
23 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
24 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
25 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
26 	{}
27 };
28 
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
31 u64 mtrr_tom2;
32 
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
35 
36 /**
37  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
40  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
41  * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
42  * 0 for operation."
43  */
44 static inline void k8_check_syscfg_dram_mod_en(void)
45 {
46 	u32 lo, hi;
47 
48 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
49 	      (boot_cpu_data.x86 >= 0x0f)))
50 		return;
51 
52 	rdmsr(MSR_K8_SYSCFG, lo, hi);
53 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
54 		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
55 		       " not cleared by BIOS, clearing this bit\n",
56 		       smp_processor_id());
57 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
58 		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
59 	}
60 }
61 
62 /*
63  * Returns the effective MTRR type for the region
64  * Error returns:
65  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
66  * - 0xFF - when MTRR is not enabled
67  */
68 u8 mtrr_type_lookup(u64 start, u64 end)
69 {
70 	int i;
71 	u64 base, mask;
72 	u8 prev_match, curr_match;
73 
74 	if (!mtrr_state_set)
75 		return 0xFF;
76 
77 	if (!mtrr_state.enabled)
78 		return 0xFF;
79 
80 	/* Make end inclusive end, instead of exclusive */
81 	end--;
82 
83 	/* Look in fixed ranges. Just return the type as per start */
84 	if (mtrr_state.have_fixed && (start < 0x100000)) {
85 		int idx;
86 
87 		if (start < 0x80000) {
88 			idx = 0;
89 			idx += (start >> 16);
90 			return mtrr_state.fixed_ranges[idx];
91 		} else if (start < 0xC0000) {
92 			idx = 1 * 8;
93 			idx += ((start - 0x80000) >> 14);
94 			return mtrr_state.fixed_ranges[idx];
95 		} else if (start < 0x1000000) {
96 			idx = 3 * 8;
97 			idx += ((start - 0xC0000) >> 12);
98 			return mtrr_state.fixed_ranges[idx];
99 		}
100 	}
101 
102 	/*
103 	 * Look in variable ranges
104 	 * Look of multiple ranges matching this address and pick type
105 	 * as per MTRR precedence
106 	 */
107 	if (!(mtrr_state.enabled & 2)) {
108 		return mtrr_state.def_type;
109 	}
110 
111 	prev_match = 0xFF;
112 	for (i = 0; i < num_var_ranges; ++i) {
113 		unsigned short start_state, end_state;
114 
115 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
116 			continue;
117 
118 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
119 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
120 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
121 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
122 
123 		start_state = ((start & mask) == (base & mask));
124 		end_state = ((end & mask) == (base & mask));
125 		if (start_state != end_state)
126 			return 0xFE;
127 
128 		if ((start & mask) != (base & mask)) {
129 			continue;
130 		}
131 
132 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
133 		if (prev_match == 0xFF) {
134 			prev_match = curr_match;
135 			continue;
136 		}
137 
138 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
139 		    curr_match == MTRR_TYPE_UNCACHABLE) {
140 			return MTRR_TYPE_UNCACHABLE;
141 		}
142 
143 		if ((prev_match == MTRR_TYPE_WRBACK &&
144 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
145 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
146 		     curr_match == MTRR_TYPE_WRBACK)) {
147 			prev_match = MTRR_TYPE_WRTHROUGH;
148 			curr_match = MTRR_TYPE_WRTHROUGH;
149 		}
150 
151 		if (prev_match != curr_match) {
152 			return MTRR_TYPE_UNCACHABLE;
153 		}
154 	}
155 
156 	if (mtrr_tom2) {
157 		if (start >= (1ULL<<32) && (end < mtrr_tom2))
158 			return MTRR_TYPE_WRBACK;
159 	}
160 
161 	if (prev_match != 0xFF)
162 		return prev_match;
163 
164 	return mtrr_state.def_type;
165 }
166 
167 /*  Get the MSR pair relating to a var range  */
168 static void
169 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
170 {
171 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
172 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
173 }
174 
175 /*  fill the MSR pair relating to a var range  */
176 void fill_mtrr_var_range(unsigned int index,
177 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
178 {
179 	struct mtrr_var_range *vr;
180 
181 	vr = mtrr_state.var_ranges;
182 
183 	vr[index].base_lo = base_lo;
184 	vr[index].base_hi = base_hi;
185 	vr[index].mask_lo = mask_lo;
186 	vr[index].mask_hi = mask_hi;
187 }
188 
189 static void
190 get_fixed_ranges(mtrr_type * frs)
191 {
192 	unsigned int *p = (unsigned int *) frs;
193 	int i;
194 
195 	k8_check_syscfg_dram_mod_en();
196 
197 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
198 
199 	for (i = 0; i < 2; i++)
200 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
201 	for (i = 0; i < 8; i++)
202 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
203 }
204 
205 void mtrr_save_fixed_ranges(void *info)
206 {
207 	if (cpu_has_mtrr)
208 		get_fixed_ranges(mtrr_state.fixed_ranges);
209 }
210 
211 static unsigned __initdata last_fixed_start;
212 static unsigned __initdata last_fixed_end;
213 static mtrr_type __initdata last_fixed_type;
214 
215 static void __init print_fixed_last(void)
216 {
217 	if (!last_fixed_end)
218 		return;
219 
220 	printk(KERN_DEBUG "  %05X-%05X %s\n", last_fixed_start,
221 		last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
222 
223 	last_fixed_end = 0;
224 }
225 
226 static void __init update_fixed_last(unsigned base, unsigned end,
227 				       mtrr_type type)
228 {
229 	last_fixed_start = base;
230 	last_fixed_end = end;
231 	last_fixed_type = type;
232 }
233 
234 static void __init print_fixed(unsigned base, unsigned step,
235 			       const mtrr_type *types)
236 {
237 	unsigned i;
238 
239 	for (i = 0; i < 8; ++i, ++types, base += step) {
240 		if (last_fixed_end == 0) {
241 			update_fixed_last(base, base + step, *types);
242 			continue;
243 		}
244 		if (last_fixed_end == base && last_fixed_type == *types) {
245 			last_fixed_end = base + step;
246 			continue;
247 		}
248 		/* new segments: gap or different type */
249 		print_fixed_last();
250 		update_fixed_last(base, base + step, *types);
251 	}
252 }
253 
254 static void prepare_set(void);
255 static void post_set(void);
256 
257 static void __init print_mtrr_state(void)
258 {
259 	unsigned int i;
260 	int high_width;
261 
262 	printk(KERN_DEBUG "MTRR default type: %s\n",
263 			 mtrr_attrib_to_str(mtrr_state.def_type));
264 	if (mtrr_state.have_fixed) {
265 		printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
266 		       mtrr_state.enabled & 1 ? "en" : "dis");
267 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
268 		for (i = 0; i < 2; ++i)
269 			print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
270 		for (i = 0; i < 8; ++i)
271 			print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
272 
273 		/* tail */
274 		print_fixed_last();
275 	}
276 	printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
277 	       mtrr_state.enabled & 2 ? "en" : "dis");
278 	if (size_or_mask & 0xffffffffUL)
279 		high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
280 	else
281 		high_width = ffs(size_or_mask>>32) + 32 - 1;
282 	high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
283 	for (i = 0; i < num_var_ranges; ++i) {
284 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
285 			printk(KERN_DEBUG "  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
286 			       i,
287 			       high_width,
288 			       mtrr_state.var_ranges[i].base_hi,
289 			       mtrr_state.var_ranges[i].base_lo >> 12,
290 			       high_width,
291 			       mtrr_state.var_ranges[i].mask_hi,
292 			       mtrr_state.var_ranges[i].mask_lo >> 12,
293 			       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
294 		else
295 			printk(KERN_DEBUG "  %u disabled\n", i);
296 	}
297 	if (mtrr_tom2) {
298 		printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
299 				  mtrr_tom2, mtrr_tom2>>20);
300 	}
301 }
302 
303 /*  Grab all of the MTRR state for this CPU into *state  */
304 void __init get_mtrr_state(void)
305 {
306 	unsigned int i;
307 	struct mtrr_var_range *vrs;
308 	unsigned lo, dummy;
309 	unsigned long flags;
310 
311 	vrs = mtrr_state.var_ranges;
312 
313 	rdmsr(MTRRcap_MSR, lo, dummy);
314 	mtrr_state.have_fixed = (lo >> 8) & 1;
315 
316 	for (i = 0; i < num_var_ranges; i++)
317 		get_mtrr_var_range(i, &vrs[i]);
318 	if (mtrr_state.have_fixed)
319 		get_fixed_ranges(mtrr_state.fixed_ranges);
320 
321 	rdmsr(MTRRdefType_MSR, lo, dummy);
322 	mtrr_state.def_type = (lo & 0xff);
323 	mtrr_state.enabled = (lo & 0xc00) >> 10;
324 
325 	if (amd_special_default_mtrr()) {
326 		unsigned low, high;
327 		/* TOP_MEM2 */
328 		rdmsr(MSR_K8_TOP_MEM2, low, high);
329 		mtrr_tom2 = high;
330 		mtrr_tom2 <<= 32;
331 		mtrr_tom2 |= low;
332 		mtrr_tom2 &= 0xffffff800000ULL;
333 	}
334 
335 	print_mtrr_state();
336 
337 	mtrr_state_set = 1;
338 
339 	/* PAT setup for BP. We need to go through sync steps here */
340 	local_irq_save(flags);
341 	prepare_set();
342 
343 	pat_init();
344 
345 	post_set();
346 	local_irq_restore(flags);
347 
348 }
349 
350 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
351 void __init mtrr_state_warn(void)
352 {
353 	unsigned long mask = smp_changes_mask;
354 
355 	if (!mask)
356 		return;
357 	if (mask & MTRR_CHANGE_MASK_FIXED)
358 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
359 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
360 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
361 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
362 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
363 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
364 	printk(KERN_INFO "mtrr: corrected configuration.\n");
365 }
366 
367 /* Doesn't attempt to pass an error out to MTRR users
368    because it's quite complicated in some cases and probably not
369    worth it because the best error handling is to ignore it. */
370 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
371 {
372 	if (wrmsr_safe(msr, a, b) < 0)
373 		printk(KERN_ERR
374 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
375 			smp_processor_id(), msr, a, b);
376 }
377 
378 /**
379  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
380  * @msr: MSR address of the MTTR which should be checked and updated
381  * @changed: pointer which indicates whether the MTRR needed to be changed
382  * @msrwords: pointer to the MSR values which the MSR should have
383  */
384 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
385 {
386 	unsigned lo, hi;
387 
388 	rdmsr(msr, lo, hi);
389 
390 	if (lo != msrwords[0] || hi != msrwords[1]) {
391 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
392 		*changed = true;
393 	}
394 }
395 
396 /**
397  * generic_get_free_region - Get a free MTRR.
398  * @base: The starting (base) address of the region.
399  * @size: The size (in bytes) of the region.
400  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
401  *
402  * Returns: The index of the region on success, else negative on error.
403  */
404 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
405 {
406 	int i, max;
407 	mtrr_type ltype;
408 	unsigned long lbase, lsize;
409 
410 	max = num_var_ranges;
411 	if (replace_reg >= 0 && replace_reg < max)
412 		return replace_reg;
413 	for (i = 0; i < max; ++i) {
414 		mtrr_if->get(i, &lbase, &lsize, &ltype);
415 		if (lsize == 0)
416 			return i;
417 	}
418 	return -ENOSPC;
419 }
420 
421 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
422 			     unsigned long *size, mtrr_type *type)
423 {
424 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
425 	unsigned int tmp, hi;
426 	int cpu;
427 
428 	/*
429 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
430 	 * from any cpu, so try to print it out directly.
431 	 */
432 	cpu = get_cpu();
433 
434 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
435 
436 	if ((mask_lo & 0x800) == 0) {
437 		/*  Invalid (i.e. free) range  */
438 		*base = 0;
439 		*size = 0;
440 		*type = 0;
441 		goto out_put_cpu;
442 	}
443 
444 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
445 
446 	/* Work out the shifted address mask: */
447 	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
448 	mask_lo = size_or_mask | tmp;
449 
450 	/* Expand tmp with high bits to all 1s: */
451 	hi = fls(tmp);
452 	if (hi > 0) {
453 		tmp |= ~((1<<(hi - 1)) - 1);
454 
455 		if (tmp != mask_lo) {
456 			WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
457 			mask_lo = tmp;
458 		}
459 	}
460 
461 	/*
462 	 * This works correctly if size is a power of two, i.e. a
463 	 * contiguous range:
464 	 */
465 	*size = -mask_lo;
466 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
467 	*type = base_lo & 0xff;
468 
469 out_put_cpu:
470 	put_cpu();
471 }
472 
473 /**
474  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
475  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
476  */
477 static int set_fixed_ranges(mtrr_type * frs)
478 {
479 	unsigned long long *saved = (unsigned long long *) frs;
480 	bool changed = false;
481 	int block=-1, range;
482 
483 	k8_check_syscfg_dram_mod_en();
484 
485 	while (fixed_range_blocks[++block].ranges)
486 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
487 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
488 		    &changed, (unsigned int *) saved++);
489 
490 	return changed;
491 }
492 
493 /*  Set the MSR pair relating to a var range. Returns TRUE if
494     changes are made  */
495 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
496 {
497 	unsigned int lo, hi;
498 	bool changed = false;
499 
500 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
501 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
502 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
503 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
504 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
505 		changed = true;
506 	}
507 
508 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
509 
510 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
511 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
512 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
513 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
514 		changed = true;
515 	}
516 	return changed;
517 }
518 
519 static u32 deftype_lo, deftype_hi;
520 
521 /**
522  * set_mtrr_state - Set the MTRR state for this CPU.
523  *
524  * NOTE: The CPU must already be in a safe state for MTRR changes.
525  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
526  */
527 static unsigned long set_mtrr_state(void)
528 {
529 	unsigned int i;
530 	unsigned long change_mask = 0;
531 
532 	for (i = 0; i < num_var_ranges; i++)
533 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
534 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
535 
536 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
537 		change_mask |= MTRR_CHANGE_MASK_FIXED;
538 
539 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
540 	   so to set it we fiddle with the saved value  */
541 	if ((deftype_lo & 0xff) != mtrr_state.def_type
542 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
543 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
544 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
545 	}
546 
547 	return change_mask;
548 }
549 
550 
551 static unsigned long cr4 = 0;
552 static DEFINE_SPINLOCK(set_atomicity_lock);
553 
554 /*
555  * Since we are disabling the cache don't allow any interrupts - they
556  * would run extremely slow and would only increase the pain.  The caller must
557  * ensure that local interrupts are disabled and are reenabled after post_set()
558  * has been called.
559  */
560 
561 static void prepare_set(void) __acquires(set_atomicity_lock)
562 {
563 	unsigned long cr0;
564 
565 	/*  Note that this is not ideal, since the cache is only flushed/disabled
566 	   for this CPU while the MTRRs are changed, but changing this requires
567 	   more invasive changes to the way the kernel boots  */
568 
569 	spin_lock(&set_atomicity_lock);
570 
571 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
572 	cr0 = read_cr0() | X86_CR0_CD;
573 	write_cr0(cr0);
574 	wbinvd();
575 
576 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
577 	if ( cpu_has_pge ) {
578 		cr4 = read_cr4();
579 		write_cr4(cr4 & ~X86_CR4_PGE);
580 	}
581 
582 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
583 	__flush_tlb();
584 
585 	/*  Save MTRR state */
586 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
587 
588 	/*  Disable MTRRs, and set the default type to uncached  */
589 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
590 }
591 
592 static void post_set(void) __releases(set_atomicity_lock)
593 {
594 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
595 	__flush_tlb();
596 
597 	/* Intel (P6) standard MTRRs */
598 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
599 
600 	/*  Enable caches  */
601 	write_cr0(read_cr0() & 0xbfffffff);
602 
603 	/*  Restore value of CR4  */
604 	if ( cpu_has_pge )
605 		write_cr4(cr4);
606 	spin_unlock(&set_atomicity_lock);
607 }
608 
609 static void generic_set_all(void)
610 {
611 	unsigned long mask, count;
612 	unsigned long flags;
613 
614 	local_irq_save(flags);
615 	prepare_set();
616 
617 	/* Actually set the state */
618 	mask = set_mtrr_state();
619 
620 	/* also set PAT */
621 	pat_init();
622 
623 	post_set();
624 	local_irq_restore(flags);
625 
626 	/*  Use the atomic bitops to update the global mask  */
627 	for (count = 0; count < sizeof mask * 8; ++count) {
628 		if (mask & 0x01)
629 			set_bit(count, &smp_changes_mask);
630 		mask >>= 1;
631 	}
632 
633 }
634 
635 static void generic_set_mtrr(unsigned int reg, unsigned long base,
636 			     unsigned long size, mtrr_type type)
637 /*  [SUMMARY] Set variable MTRR register on the local CPU.
638     <reg> The register to set.
639     <base> The base address of the region.
640     <size> The size of the region. If this is 0 the region is disabled.
641     <type> The type of the region.
642     [RETURNS] Nothing.
643 */
644 {
645 	unsigned long flags;
646 	struct mtrr_var_range *vr;
647 
648 	vr = &mtrr_state.var_ranges[reg];
649 
650 	local_irq_save(flags);
651 	prepare_set();
652 
653 	if (size == 0) {
654 		/* The invalid bit is kept in the mask, so we simply clear the
655 		   relevant mask register to disable a range. */
656 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
657 		memset(vr, 0, sizeof(struct mtrr_var_range));
658 	} else {
659 		vr->base_lo = base << PAGE_SHIFT | type;
660 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
661 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
662 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
663 
664 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
665 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
666 	}
667 
668 	post_set();
669 	local_irq_restore(flags);
670 }
671 
672 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
673 {
674 	unsigned long lbase, last;
675 
676 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
677 	    and not touch 0x70000000->0x7003FFFF */
678 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
679 	    boot_cpu_data.x86_model == 1 &&
680 	    boot_cpu_data.x86_mask <= 7) {
681 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
682 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
683 			return -EINVAL;
684 		}
685 		if (!(base + size < 0x70000 || base > 0x7003F) &&
686 		    (type == MTRR_TYPE_WRCOMB
687 		     || type == MTRR_TYPE_WRBACK)) {
688 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
689 			return -EINVAL;
690 		}
691 	}
692 
693 	/*  Check upper bits of base and last are equal and lower bits are 0
694 	    for base and 1 for last  */
695 	last = base + size - 1;
696 	for (lbase = base; !(lbase & 1) && (last & 1);
697 	     lbase = lbase >> 1, last = last >> 1) ;
698 	if (lbase != last) {
699 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
700 		       base, size);
701 		return -EINVAL;
702 	}
703 	return 0;
704 }
705 
706 
707 static int generic_have_wrcomb(void)
708 {
709 	unsigned long config, dummy;
710 	rdmsr(MTRRcap_MSR, config, dummy);
711 	return (config & (1 << 10));
712 }
713 
714 int positive_have_wrcomb(void)
715 {
716 	return 1;
717 }
718 
719 /* generic structure...
720  */
721 struct mtrr_ops generic_mtrr_ops = {
722 	.use_intel_if      = 1,
723 	.set_all	   = generic_set_all,
724 	.get               = generic_get_mtrr,
725 	.get_free_region   = generic_get_free_region,
726 	.set               = generic_set_mtrr,
727 	.validate_add_page = generic_validate_add_page,
728 	.have_wrcomb       = generic_have_wrcomb,
729 };
730