xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/generic.c (revision b627b4ed)
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2    because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16 
17 struct fixed_range_block {
18 	int base_msr; /* start address of an MTRR block */
19 	int ranges;   /* number of MTRRs in this block  */
20 };
21 
22 static struct fixed_range_block fixed_range_blocks[] = {
23 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
24 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
25 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
26 	{}
27 };
28 
29 static unsigned long smp_changes_mask;
30 static int mtrr_state_set;
31 u64 mtrr_tom2;
32 
33 struct mtrr_state_type mtrr_state = {};
34 EXPORT_SYMBOL_GPL(mtrr_state);
35 
36 /**
37  * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38  * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39  * Opteron Processors" (26094 Rev. 3.30 February 2006), section
40  * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
41  * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
42  * 0 for operation."
43  */
44 static inline void k8_check_syscfg_dram_mod_en(void)
45 {
46 	u32 lo, hi;
47 
48 	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
49 	      (boot_cpu_data.x86 >= 0x0f)))
50 		return;
51 
52 	rdmsr(MSR_K8_SYSCFG, lo, hi);
53 	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
54 		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
55 		       " not cleared by BIOS, clearing this bit\n",
56 		       smp_processor_id());
57 		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
58 		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
59 	}
60 }
61 
62 /*
63  * Returns the effective MTRR type for the region
64  * Error returns:
65  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
66  * - 0xFF - when MTRR is not enabled
67  */
68 u8 mtrr_type_lookup(u64 start, u64 end)
69 {
70 	int i;
71 	u64 base, mask;
72 	u8 prev_match, curr_match;
73 
74 	if (!mtrr_state_set)
75 		return 0xFF;
76 
77 	if (!mtrr_state.enabled)
78 		return 0xFF;
79 
80 	/* Make end inclusive end, instead of exclusive */
81 	end--;
82 
83 	/* Look in fixed ranges. Just return the type as per start */
84 	if (mtrr_state.have_fixed && (start < 0x100000)) {
85 		int idx;
86 
87 		if (start < 0x80000) {
88 			idx = 0;
89 			idx += (start >> 16);
90 			return mtrr_state.fixed_ranges[idx];
91 		} else if (start < 0xC0000) {
92 			idx = 1 * 8;
93 			idx += ((start - 0x80000) >> 14);
94 			return mtrr_state.fixed_ranges[idx];
95 		} else if (start < 0x1000000) {
96 			idx = 3 * 8;
97 			idx += ((start - 0xC0000) >> 12);
98 			return mtrr_state.fixed_ranges[idx];
99 		}
100 	}
101 
102 	/*
103 	 * Look in variable ranges
104 	 * Look of multiple ranges matching this address and pick type
105 	 * as per MTRR precedence
106 	 */
107 	if (!(mtrr_state.enabled & 2)) {
108 		return mtrr_state.def_type;
109 	}
110 
111 	prev_match = 0xFF;
112 	for (i = 0; i < num_var_ranges; ++i) {
113 		unsigned short start_state, end_state;
114 
115 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
116 			continue;
117 
118 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
119 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
120 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
121 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
122 
123 		start_state = ((start & mask) == (base & mask));
124 		end_state = ((end & mask) == (base & mask));
125 		if (start_state != end_state)
126 			return 0xFE;
127 
128 		if ((start & mask) != (base & mask)) {
129 			continue;
130 		}
131 
132 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
133 		if (prev_match == 0xFF) {
134 			prev_match = curr_match;
135 			continue;
136 		}
137 
138 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
139 		    curr_match == MTRR_TYPE_UNCACHABLE) {
140 			return MTRR_TYPE_UNCACHABLE;
141 		}
142 
143 		if ((prev_match == MTRR_TYPE_WRBACK &&
144 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
145 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
146 		     curr_match == MTRR_TYPE_WRBACK)) {
147 			prev_match = MTRR_TYPE_WRTHROUGH;
148 			curr_match = MTRR_TYPE_WRTHROUGH;
149 		}
150 
151 		if (prev_match != curr_match) {
152 			return MTRR_TYPE_UNCACHABLE;
153 		}
154 	}
155 
156 	if (mtrr_tom2) {
157 		if (start >= (1ULL<<32) && (end < mtrr_tom2))
158 			return MTRR_TYPE_WRBACK;
159 	}
160 
161 	if (prev_match != 0xFF)
162 		return prev_match;
163 
164 	return mtrr_state.def_type;
165 }
166 
167 /*  Get the MSR pair relating to a var range  */
168 static void
169 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
170 {
171 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
172 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
173 }
174 
175 /*  fill the MSR pair relating to a var range  */
176 void fill_mtrr_var_range(unsigned int index,
177 		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
178 {
179 	struct mtrr_var_range *vr;
180 
181 	vr = mtrr_state.var_ranges;
182 
183 	vr[index].base_lo = base_lo;
184 	vr[index].base_hi = base_hi;
185 	vr[index].mask_lo = mask_lo;
186 	vr[index].mask_hi = mask_hi;
187 }
188 
189 static void
190 get_fixed_ranges(mtrr_type * frs)
191 {
192 	unsigned int *p = (unsigned int *) frs;
193 	int i;
194 
195 	k8_check_syscfg_dram_mod_en();
196 
197 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
198 
199 	for (i = 0; i < 2; i++)
200 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
201 	for (i = 0; i < 8; i++)
202 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
203 }
204 
205 void mtrr_save_fixed_ranges(void *info)
206 {
207 	if (cpu_has_mtrr)
208 		get_fixed_ranges(mtrr_state.fixed_ranges);
209 }
210 
211 static unsigned __initdata last_fixed_start;
212 static unsigned __initdata last_fixed_end;
213 static mtrr_type __initdata last_fixed_type;
214 
215 static void __init print_fixed_last(void)
216 {
217 	if (!last_fixed_end)
218 		return;
219 
220 	printk(KERN_DEBUG "  %05X-%05X %s\n", last_fixed_start,
221 		last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
222 
223 	last_fixed_end = 0;
224 }
225 
226 static void __init update_fixed_last(unsigned base, unsigned end,
227 				       mtrr_type type)
228 {
229 	last_fixed_start = base;
230 	last_fixed_end = end;
231 	last_fixed_type = type;
232 }
233 
234 static void __init print_fixed(unsigned base, unsigned step,
235 			       const mtrr_type *types)
236 {
237 	unsigned i;
238 
239 	for (i = 0; i < 8; ++i, ++types, base += step) {
240 		if (last_fixed_end == 0) {
241 			update_fixed_last(base, base + step, *types);
242 			continue;
243 		}
244 		if (last_fixed_end == base && last_fixed_type == *types) {
245 			last_fixed_end = base + step;
246 			continue;
247 		}
248 		/* new segments: gap or different type */
249 		print_fixed_last();
250 		update_fixed_last(base, base + step, *types);
251 	}
252 }
253 
254 static void prepare_set(void);
255 static void post_set(void);
256 
257 static void __init print_mtrr_state(void)
258 {
259 	unsigned int i;
260 	int high_width;
261 
262 	printk(KERN_DEBUG "MTRR default type: %s\n",
263 			 mtrr_attrib_to_str(mtrr_state.def_type));
264 	if (mtrr_state.have_fixed) {
265 		printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n",
266 		       mtrr_state.enabled & 1 ? "en" : "dis");
267 		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
268 		for (i = 0; i < 2; ++i)
269 			print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
270 		for (i = 0; i < 8; ++i)
271 			print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
272 
273 		/* tail */
274 		print_fixed_last();
275 	}
276 	printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
277 	       mtrr_state.enabled & 2 ? "en" : "dis");
278 	high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
279 	for (i = 0; i < num_var_ranges; ++i) {
280 		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
281 			printk(KERN_DEBUG "  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
282 			       i,
283 			       high_width,
284 			       mtrr_state.var_ranges[i].base_hi,
285 			       mtrr_state.var_ranges[i].base_lo >> 12,
286 			       high_width,
287 			       mtrr_state.var_ranges[i].mask_hi,
288 			       mtrr_state.var_ranges[i].mask_lo >> 12,
289 			       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
290 		else
291 			printk(KERN_DEBUG "  %u disabled\n", i);
292 	}
293 	if (mtrr_tom2) {
294 		printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
295 				  mtrr_tom2, mtrr_tom2>>20);
296 	}
297 }
298 
299 /*  Grab all of the MTRR state for this CPU into *state  */
300 void __init get_mtrr_state(void)
301 {
302 	unsigned int i;
303 	struct mtrr_var_range *vrs;
304 	unsigned lo, dummy;
305 	unsigned long flags;
306 
307 	vrs = mtrr_state.var_ranges;
308 
309 	rdmsr(MTRRcap_MSR, lo, dummy);
310 	mtrr_state.have_fixed = (lo >> 8) & 1;
311 
312 	for (i = 0; i < num_var_ranges; i++)
313 		get_mtrr_var_range(i, &vrs[i]);
314 	if (mtrr_state.have_fixed)
315 		get_fixed_ranges(mtrr_state.fixed_ranges);
316 
317 	rdmsr(MTRRdefType_MSR, lo, dummy);
318 	mtrr_state.def_type = (lo & 0xff);
319 	mtrr_state.enabled = (lo & 0xc00) >> 10;
320 
321 	if (amd_special_default_mtrr()) {
322 		unsigned low, high;
323 		/* TOP_MEM2 */
324 		rdmsr(MSR_K8_TOP_MEM2, low, high);
325 		mtrr_tom2 = high;
326 		mtrr_tom2 <<= 32;
327 		mtrr_tom2 |= low;
328 		mtrr_tom2 &= 0xffffff800000ULL;
329 	}
330 
331 	print_mtrr_state();
332 
333 	mtrr_state_set = 1;
334 
335 	/* PAT setup for BP. We need to go through sync steps here */
336 	local_irq_save(flags);
337 	prepare_set();
338 
339 	pat_init();
340 
341 	post_set();
342 	local_irq_restore(flags);
343 
344 }
345 
346 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
347 void __init mtrr_state_warn(void)
348 {
349 	unsigned long mask = smp_changes_mask;
350 
351 	if (!mask)
352 		return;
353 	if (mask & MTRR_CHANGE_MASK_FIXED)
354 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
355 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
356 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
357 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
358 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
359 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
360 	printk(KERN_INFO "mtrr: corrected configuration.\n");
361 }
362 
363 /* Doesn't attempt to pass an error out to MTRR users
364    because it's quite complicated in some cases and probably not
365    worth it because the best error handling is to ignore it. */
366 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
367 {
368 	if (wrmsr_safe(msr, a, b) < 0)
369 		printk(KERN_ERR
370 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
371 			smp_processor_id(), msr, a, b);
372 }
373 
374 /**
375  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
376  * @msr: MSR address of the MTTR which should be checked and updated
377  * @changed: pointer which indicates whether the MTRR needed to be changed
378  * @msrwords: pointer to the MSR values which the MSR should have
379  */
380 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
381 {
382 	unsigned lo, hi;
383 
384 	rdmsr(msr, lo, hi);
385 
386 	if (lo != msrwords[0] || hi != msrwords[1]) {
387 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
388 		*changed = true;
389 	}
390 }
391 
392 /**
393  * generic_get_free_region - Get a free MTRR.
394  * @base: The starting (base) address of the region.
395  * @size: The size (in bytes) of the region.
396  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
397  *
398  * Returns: The index of the region on success, else negative on error.
399  */
400 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
401 {
402 	int i, max;
403 	mtrr_type ltype;
404 	unsigned long lbase, lsize;
405 
406 	max = num_var_ranges;
407 	if (replace_reg >= 0 && replace_reg < max)
408 		return replace_reg;
409 	for (i = 0; i < max; ++i) {
410 		mtrr_if->get(i, &lbase, &lsize, &ltype);
411 		if (lsize == 0)
412 			return i;
413 	}
414 	return -ENOSPC;
415 }
416 
417 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
418 			     unsigned long *size, mtrr_type *type)
419 {
420 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
421 	unsigned int tmp, hi;
422 	int cpu;
423 
424 	/*
425 	 * get_mtrr doesn't need to update mtrr_state, also it could be called
426 	 * from any cpu, so try to print it out directly.
427 	 */
428 	cpu = get_cpu();
429 
430 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
431 
432 	if ((mask_lo & 0x800) == 0) {
433 		/*  Invalid (i.e. free) range  */
434 		*base = 0;
435 		*size = 0;
436 		*type = 0;
437 		goto out_put_cpu;
438 	}
439 
440 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
441 
442 	/* Work out the shifted address mask: */
443 	tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
444 	mask_lo = size_or_mask | tmp;
445 
446 	/* Expand tmp with high bits to all 1s: */
447 	hi = fls(tmp);
448 	if (hi > 0) {
449 		tmp |= ~((1<<(hi - 1)) - 1);
450 
451 		if (tmp != mask_lo) {
452 			WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
453 			mask_lo = tmp;
454 		}
455 	}
456 
457 	/*
458 	 * This works correctly if size is a power of two, i.e. a
459 	 * contiguous range:
460 	 */
461 	*size = -mask_lo;
462 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
463 	*type = base_lo & 0xff;
464 
465 out_put_cpu:
466 	put_cpu();
467 }
468 
469 /**
470  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
471  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
472  */
473 static int set_fixed_ranges(mtrr_type * frs)
474 {
475 	unsigned long long *saved = (unsigned long long *) frs;
476 	bool changed = false;
477 	int block=-1, range;
478 
479 	k8_check_syscfg_dram_mod_en();
480 
481 	while (fixed_range_blocks[++block].ranges)
482 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
483 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
484 		    &changed, (unsigned int *) saved++);
485 
486 	return changed;
487 }
488 
489 /*  Set the MSR pair relating to a var range. Returns TRUE if
490     changes are made  */
491 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
492 {
493 	unsigned int lo, hi;
494 	bool changed = false;
495 
496 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
497 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
498 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
499 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
500 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
501 		changed = true;
502 	}
503 
504 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
505 
506 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
507 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
508 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
509 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
510 		changed = true;
511 	}
512 	return changed;
513 }
514 
515 static u32 deftype_lo, deftype_hi;
516 
517 /**
518  * set_mtrr_state - Set the MTRR state for this CPU.
519  *
520  * NOTE: The CPU must already be in a safe state for MTRR changes.
521  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
522  */
523 static unsigned long set_mtrr_state(void)
524 {
525 	unsigned int i;
526 	unsigned long change_mask = 0;
527 
528 	for (i = 0; i < num_var_ranges; i++)
529 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
530 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
531 
532 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
533 		change_mask |= MTRR_CHANGE_MASK_FIXED;
534 
535 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
536 	   so to set it we fiddle with the saved value  */
537 	if ((deftype_lo & 0xff) != mtrr_state.def_type
538 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
539 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
540 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
541 	}
542 
543 	return change_mask;
544 }
545 
546 
547 static unsigned long cr4 = 0;
548 static DEFINE_SPINLOCK(set_atomicity_lock);
549 
550 /*
551  * Since we are disabling the cache don't allow any interrupts - they
552  * would run extremely slow and would only increase the pain.  The caller must
553  * ensure that local interrupts are disabled and are reenabled after post_set()
554  * has been called.
555  */
556 
557 static void prepare_set(void) __acquires(set_atomicity_lock)
558 {
559 	unsigned long cr0;
560 
561 	/*  Note that this is not ideal, since the cache is only flushed/disabled
562 	   for this CPU while the MTRRs are changed, but changing this requires
563 	   more invasive changes to the way the kernel boots  */
564 
565 	spin_lock(&set_atomicity_lock);
566 
567 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
568 	cr0 = read_cr0() | X86_CR0_CD;
569 	write_cr0(cr0);
570 	wbinvd();
571 
572 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
573 	if ( cpu_has_pge ) {
574 		cr4 = read_cr4();
575 		write_cr4(cr4 & ~X86_CR4_PGE);
576 	}
577 
578 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
579 	__flush_tlb();
580 
581 	/*  Save MTRR state */
582 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
583 
584 	/*  Disable MTRRs, and set the default type to uncached  */
585 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
586 }
587 
588 static void post_set(void) __releases(set_atomicity_lock)
589 {
590 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
591 	__flush_tlb();
592 
593 	/* Intel (P6) standard MTRRs */
594 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
595 
596 	/*  Enable caches  */
597 	write_cr0(read_cr0() & 0xbfffffff);
598 
599 	/*  Restore value of CR4  */
600 	if ( cpu_has_pge )
601 		write_cr4(cr4);
602 	spin_unlock(&set_atomicity_lock);
603 }
604 
605 static void generic_set_all(void)
606 {
607 	unsigned long mask, count;
608 	unsigned long flags;
609 
610 	local_irq_save(flags);
611 	prepare_set();
612 
613 	/* Actually set the state */
614 	mask = set_mtrr_state();
615 
616 	/* also set PAT */
617 	pat_init();
618 
619 	post_set();
620 	local_irq_restore(flags);
621 
622 	/*  Use the atomic bitops to update the global mask  */
623 	for (count = 0; count < sizeof mask * 8; ++count) {
624 		if (mask & 0x01)
625 			set_bit(count, &smp_changes_mask);
626 		mask >>= 1;
627 	}
628 
629 }
630 
631 static void generic_set_mtrr(unsigned int reg, unsigned long base,
632 			     unsigned long size, mtrr_type type)
633 /*  [SUMMARY] Set variable MTRR register on the local CPU.
634     <reg> The register to set.
635     <base> The base address of the region.
636     <size> The size of the region. If this is 0 the region is disabled.
637     <type> The type of the region.
638     [RETURNS] Nothing.
639 */
640 {
641 	unsigned long flags;
642 	struct mtrr_var_range *vr;
643 
644 	vr = &mtrr_state.var_ranges[reg];
645 
646 	local_irq_save(flags);
647 	prepare_set();
648 
649 	if (size == 0) {
650 		/* The invalid bit is kept in the mask, so we simply clear the
651 		   relevant mask register to disable a range. */
652 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
653 		memset(vr, 0, sizeof(struct mtrr_var_range));
654 	} else {
655 		vr->base_lo = base << PAGE_SHIFT | type;
656 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
657 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
658 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
659 
660 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
661 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
662 	}
663 
664 	post_set();
665 	local_irq_restore(flags);
666 }
667 
668 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
669 {
670 	unsigned long lbase, last;
671 
672 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
673 	    and not touch 0x70000000->0x7003FFFF */
674 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
675 	    boot_cpu_data.x86_model == 1 &&
676 	    boot_cpu_data.x86_mask <= 7) {
677 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
678 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
679 			return -EINVAL;
680 		}
681 		if (!(base + size < 0x70000 || base > 0x7003F) &&
682 		    (type == MTRR_TYPE_WRCOMB
683 		     || type == MTRR_TYPE_WRBACK)) {
684 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
685 			return -EINVAL;
686 		}
687 	}
688 
689 	/*  Check upper bits of base and last are equal and lower bits are 0
690 	    for base and 1 for last  */
691 	last = base + size - 1;
692 	for (lbase = base; !(lbase & 1) && (last & 1);
693 	     lbase = lbase >> 1, last = last >> 1) ;
694 	if (lbase != last) {
695 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
696 		       base, size);
697 		return -EINVAL;
698 	}
699 	return 0;
700 }
701 
702 
703 static int generic_have_wrcomb(void)
704 {
705 	unsigned long config, dummy;
706 	rdmsr(MTRRcap_MSR, config, dummy);
707 	return (config & (1 << 10));
708 }
709 
710 int positive_have_wrcomb(void)
711 {
712 	return 1;
713 }
714 
715 /* generic structure...
716  */
717 struct mtrr_ops generic_mtrr_ops = {
718 	.use_intel_if      = 1,
719 	.set_all	   = generic_set_all,
720 	.get               = generic_get_mtrr,
721 	.get_free_region   = generic_get_free_region,
722 	.set               = generic_set_mtrr,
723 	.validate_add_page = generic_validate_add_page,
724 	.have_wrcomb       = generic_have_wrcomb,
725 };
726