xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/generic.c (revision 22246614)
1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2    because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <asm/io.h>
8 #include <asm/mtrr.h>
9 #include <asm/msr.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
14 #include <asm/pat.h>
15 #include "mtrr.h"
16 
17 struct mtrr_state {
18 	struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
19 	mtrr_type fixed_ranges[NUM_FIXED_RANGES];
20 	unsigned char enabled;
21 	unsigned char have_fixed;
22 	mtrr_type def_type;
23 };
24 
25 struct fixed_range_block {
26 	int base_msr; /* start address of an MTRR block */
27 	int ranges;   /* number of MTRRs in this block  */
28 };
29 
30 static struct fixed_range_block fixed_range_blocks[] = {
31 	{ MTRRfix64K_00000_MSR, 1 }, /* one  64k MTRR  */
32 	{ MTRRfix16K_80000_MSR, 2 }, /* two  16k MTRRs */
33 	{ MTRRfix4K_C0000_MSR,  8 }, /* eight 4k MTRRs */
34 	{}
35 };
36 
37 static unsigned long smp_changes_mask;
38 static struct mtrr_state mtrr_state = {};
39 static int mtrr_state_set;
40 static u64 tom2;
41 
42 #undef MODULE_PARAM_PREFIX
43 #define MODULE_PARAM_PREFIX "mtrr."
44 
45 static int mtrr_show;
46 module_param_named(show, mtrr_show, bool, 0);
47 
48 /*
49  * Returns the effective MTRR type for the region
50  * Error returns:
51  * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
52  * - 0xFF - when MTRR is not enabled
53  */
54 u8 mtrr_type_lookup(u64 start, u64 end)
55 {
56 	int i;
57 	u64 base, mask;
58 	u8 prev_match, curr_match;
59 
60 	if (!mtrr_state_set)
61 		return 0xFF;
62 
63 	if (!mtrr_state.enabled)
64 		return 0xFF;
65 
66 	/* Make end inclusive end, instead of exclusive */
67 	end--;
68 
69 	/* Look in fixed ranges. Just return the type as per start */
70 	if (mtrr_state.have_fixed && (start < 0x100000)) {
71 		int idx;
72 
73 		if (start < 0x80000) {
74 			idx = 0;
75 			idx += (start >> 16);
76 			return mtrr_state.fixed_ranges[idx];
77 		} else if (start < 0xC0000) {
78 			idx = 1 * 8;
79 			idx += ((start - 0x80000) >> 14);
80 			return mtrr_state.fixed_ranges[idx];
81 		} else if (start < 0x1000000) {
82 			idx = 3 * 8;
83 			idx += ((start - 0xC0000) >> 12);
84 			return mtrr_state.fixed_ranges[idx];
85 		}
86 	}
87 
88 	/*
89 	 * Look in variable ranges
90 	 * Look of multiple ranges matching this address and pick type
91 	 * as per MTRR precedence
92 	 */
93 	if (!(mtrr_state.enabled & 2)) {
94 		return mtrr_state.def_type;
95 	}
96 
97 	prev_match = 0xFF;
98 	for (i = 0; i < num_var_ranges; ++i) {
99 		unsigned short start_state, end_state;
100 
101 		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
102 			continue;
103 
104 		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
105 		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
106 		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
107 		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
108 
109 		start_state = ((start & mask) == (base & mask));
110 		end_state = ((end & mask) == (base & mask));
111 		if (start_state != end_state)
112 			return 0xFE;
113 
114 		if ((start & mask) != (base & mask)) {
115 			continue;
116 		}
117 
118 		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
119 		if (prev_match == 0xFF) {
120 			prev_match = curr_match;
121 			continue;
122 		}
123 
124 		if (prev_match == MTRR_TYPE_UNCACHABLE ||
125 		    curr_match == MTRR_TYPE_UNCACHABLE) {
126 			return MTRR_TYPE_UNCACHABLE;
127 		}
128 
129 		if ((prev_match == MTRR_TYPE_WRBACK &&
130 		     curr_match == MTRR_TYPE_WRTHROUGH) ||
131 		    (prev_match == MTRR_TYPE_WRTHROUGH &&
132 		     curr_match == MTRR_TYPE_WRBACK)) {
133 			prev_match = MTRR_TYPE_WRTHROUGH;
134 			curr_match = MTRR_TYPE_WRTHROUGH;
135 		}
136 
137 		if (prev_match != curr_match) {
138 			return MTRR_TYPE_UNCACHABLE;
139 		}
140 	}
141 
142 	if (tom2) {
143 		if (start >= (1ULL<<32) && (end < tom2))
144 			return MTRR_TYPE_WRBACK;
145 	}
146 
147 	if (prev_match != 0xFF)
148 		return prev_match;
149 
150 	return mtrr_state.def_type;
151 }
152 
153 /*  Get the MSR pair relating to a var range  */
154 static void
155 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
156 {
157 	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
158 	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
159 }
160 
161 static void
162 get_fixed_ranges(mtrr_type * frs)
163 {
164 	unsigned int *p = (unsigned int *) frs;
165 	int i;
166 
167 	rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
168 
169 	for (i = 0; i < 2; i++)
170 		rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
171 	for (i = 0; i < 8; i++)
172 		rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
173 }
174 
175 void mtrr_save_fixed_ranges(void *info)
176 {
177 	if (cpu_has_mtrr)
178 		get_fixed_ranges(mtrr_state.fixed_ranges);
179 }
180 
181 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
182 {
183 	unsigned i;
184 
185 	for (i = 0; i < 8; ++i, ++types, base += step)
186 		printk(KERN_INFO "MTRR %05X-%05X %s\n",
187 			base, base + step - 1, mtrr_attrib_to_str(*types));
188 }
189 
190 static void prepare_set(void);
191 static void post_set(void);
192 
193 /*  Grab all of the MTRR state for this CPU into *state  */
194 void __init get_mtrr_state(void)
195 {
196 	unsigned int i;
197 	struct mtrr_var_range *vrs;
198 	unsigned lo, dummy;
199 	unsigned long flags;
200 
201 	vrs = mtrr_state.var_ranges;
202 
203 	rdmsr(MTRRcap_MSR, lo, dummy);
204 	mtrr_state.have_fixed = (lo >> 8) & 1;
205 
206 	for (i = 0; i < num_var_ranges; i++)
207 		get_mtrr_var_range(i, &vrs[i]);
208 	if (mtrr_state.have_fixed)
209 		get_fixed_ranges(mtrr_state.fixed_ranges);
210 
211 	rdmsr(MTRRdefType_MSR, lo, dummy);
212 	mtrr_state.def_type = (lo & 0xff);
213 	mtrr_state.enabled = (lo & 0xc00) >> 10;
214 
215 	if (amd_special_default_mtrr()) {
216 		unsigned lo, hi;
217 		/* TOP_MEM2 */
218 		rdmsr(MSR_K8_TOP_MEM2, lo, hi);
219 		tom2 = hi;
220 		tom2 <<= 32;
221 		tom2 |= lo;
222 		tom2 &= 0xffffff8000000ULL;
223 	}
224 	if (mtrr_show) {
225 		int high_width;
226 
227 		printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
228 		if (mtrr_state.have_fixed) {
229 			printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
230 			       mtrr_state.enabled & 1 ? "en" : "dis");
231 			print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
232 			for (i = 0; i < 2; ++i)
233 				print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
234 			for (i = 0; i < 8; ++i)
235 				print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
236 		}
237 		printk(KERN_INFO "MTRR variable ranges %sabled:\n",
238 		       mtrr_state.enabled & 2 ? "en" : "dis");
239 		high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
240 		for (i = 0; i < num_var_ranges; ++i) {
241 			if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
242 				printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
243 				       i,
244 				       high_width,
245 				       mtrr_state.var_ranges[i].base_hi,
246 				       mtrr_state.var_ranges[i].base_lo >> 12,
247 				       high_width,
248 				       mtrr_state.var_ranges[i].mask_hi,
249 				       mtrr_state.var_ranges[i].mask_lo >> 12,
250 				       mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
251 			else
252 				printk(KERN_INFO "MTRR %u disabled\n", i);
253 		}
254 		if (tom2) {
255 			printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
256 					  tom2, tom2>>20);
257 		}
258 	}
259 	mtrr_state_set = 1;
260 
261 	/* PAT setup for BP. We need to go through sync steps here */
262 	local_irq_save(flags);
263 	prepare_set();
264 
265 	pat_init();
266 
267 	post_set();
268 	local_irq_restore(flags);
269 
270 }
271 
272 /*  Some BIOS's are fucked and don't set all MTRRs the same!  */
273 void __init mtrr_state_warn(void)
274 {
275 	unsigned long mask = smp_changes_mask;
276 
277 	if (!mask)
278 		return;
279 	if (mask & MTRR_CHANGE_MASK_FIXED)
280 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
281 	if (mask & MTRR_CHANGE_MASK_VARIABLE)
282 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
283 	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
284 		printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
285 	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
286 	printk(KERN_INFO "mtrr: corrected configuration.\n");
287 }
288 
289 /* Doesn't attempt to pass an error out to MTRR users
290    because it's quite complicated in some cases and probably not
291    worth it because the best error handling is to ignore it. */
292 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
293 {
294 	if (wrmsr_safe(msr, a, b) < 0)
295 		printk(KERN_ERR
296 			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
297 			smp_processor_id(), msr, a, b);
298 }
299 
300 /**
301  * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
302  * see AMD publication no. 24593, chapter 3.2.1 for more information
303  */
304 static inline void k8_enable_fixed_iorrs(void)
305 {
306 	unsigned lo, hi;
307 
308 	rdmsr(MSR_K8_SYSCFG, lo, hi);
309 	mtrr_wrmsr(MSR_K8_SYSCFG, lo
310 				| K8_MTRRFIXRANGE_DRAM_ENABLE
311 				| K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
312 }
313 
314 /**
315  * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
316  * @msr: MSR address of the MTTR which should be checked and updated
317  * @changed: pointer which indicates whether the MTRR needed to be changed
318  * @msrwords: pointer to the MSR values which the MSR should have
319  *
320  * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
321  * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
322  */
323 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
324 {
325 	unsigned lo, hi;
326 
327 	rdmsr(msr, lo, hi);
328 
329 	if (lo != msrwords[0] || hi != msrwords[1]) {
330 		if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
331 		    boot_cpu_data.x86 == 15 &&
332 		    ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
333 			k8_enable_fixed_iorrs();
334 		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
335 		*changed = true;
336 	}
337 }
338 
339 /**
340  * generic_get_free_region - Get a free MTRR.
341  * @base: The starting (base) address of the region.
342  * @size: The size (in bytes) of the region.
343  * @replace_reg: mtrr index to be replaced; set to invalid value if none.
344  *
345  * Returns: The index of the region on success, else negative on error.
346  */
347 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
348 {
349 	int i, max;
350 	mtrr_type ltype;
351 	unsigned long lbase, lsize;
352 
353 	max = num_var_ranges;
354 	if (replace_reg >= 0 && replace_reg < max)
355 		return replace_reg;
356 	for (i = 0; i < max; ++i) {
357 		mtrr_if->get(i, &lbase, &lsize, &ltype);
358 		if (lsize == 0)
359 			return i;
360 	}
361 	return -ENOSPC;
362 }
363 
364 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
365 			     unsigned long *size, mtrr_type *type)
366 {
367 	unsigned int mask_lo, mask_hi, base_lo, base_hi;
368 
369 	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
370 	if ((mask_lo & 0x800) == 0) {
371 		/*  Invalid (i.e. free) range  */
372 		*base = 0;
373 		*size = 0;
374 		*type = 0;
375 		return;
376 	}
377 
378 	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
379 
380 	/* Work out the shifted address mask. */
381 	mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
382 	    | mask_lo >> PAGE_SHIFT;
383 
384 	/* This works correctly if size is a power of two, i.e. a
385 	   contiguous range. */
386 	*size = -mask_lo;
387 	*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
388 	*type = base_lo & 0xff;
389 }
390 
391 /**
392  * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
393  * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
394  */
395 static int set_fixed_ranges(mtrr_type * frs)
396 {
397 	unsigned long long *saved = (unsigned long long *) frs;
398 	bool changed = false;
399 	int block=-1, range;
400 
401 	while (fixed_range_blocks[++block].ranges)
402 	    for (range=0; range < fixed_range_blocks[block].ranges; range++)
403 		set_fixed_range(fixed_range_blocks[block].base_msr + range,
404 		    &changed, (unsigned int *) saved++);
405 
406 	return changed;
407 }
408 
409 /*  Set the MSR pair relating to a var range. Returns TRUE if
410     changes are made  */
411 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
412 {
413 	unsigned int lo, hi;
414 	bool changed = false;
415 
416 	rdmsr(MTRRphysBase_MSR(index), lo, hi);
417 	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
418 	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
419 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
420 		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
421 		changed = true;
422 	}
423 
424 	rdmsr(MTRRphysMask_MSR(index), lo, hi);
425 
426 	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
427 	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
428 		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
429 		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
430 		changed = true;
431 	}
432 	return changed;
433 }
434 
435 static u32 deftype_lo, deftype_hi;
436 
437 /**
438  * set_mtrr_state - Set the MTRR state for this CPU.
439  *
440  * NOTE: The CPU must already be in a safe state for MTRR changes.
441  * RETURNS: 0 if no changes made, else a mask indicating what was changed.
442  */
443 static unsigned long set_mtrr_state(void)
444 {
445 	unsigned int i;
446 	unsigned long change_mask = 0;
447 
448 	for (i = 0; i < num_var_ranges; i++)
449 		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
450 			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
451 
452 	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
453 		change_mask |= MTRR_CHANGE_MASK_FIXED;
454 
455 	/*  Set_mtrr_restore restores the old value of MTRRdefType,
456 	   so to set it we fiddle with the saved value  */
457 	if ((deftype_lo & 0xff) != mtrr_state.def_type
458 	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
459 		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
460 		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
461 	}
462 
463 	return change_mask;
464 }
465 
466 
467 static unsigned long cr4 = 0;
468 static DEFINE_SPINLOCK(set_atomicity_lock);
469 
470 /*
471  * Since we are disabling the cache don't allow any interrupts - they
472  * would run extremely slow and would only increase the pain.  The caller must
473  * ensure that local interrupts are disabled and are reenabled after post_set()
474  * has been called.
475  */
476 
477 static void prepare_set(void) __acquires(set_atomicity_lock)
478 {
479 	unsigned long cr0;
480 
481 	/*  Note that this is not ideal, since the cache is only flushed/disabled
482 	   for this CPU while the MTRRs are changed, but changing this requires
483 	   more invasive changes to the way the kernel boots  */
484 
485 	spin_lock(&set_atomicity_lock);
486 
487 	/*  Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
488 	cr0 = read_cr0() | X86_CR0_CD;
489 	write_cr0(cr0);
490 	wbinvd();
491 
492 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
493 	if ( cpu_has_pge ) {
494 		cr4 = read_cr4();
495 		write_cr4(cr4 & ~X86_CR4_PGE);
496 	}
497 
498 	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
499 	__flush_tlb();
500 
501 	/*  Save MTRR state */
502 	rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
503 
504 	/*  Disable MTRRs, and set the default type to uncached  */
505 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
506 }
507 
508 static void post_set(void) __releases(set_atomicity_lock)
509 {
510 	/*  Flush TLBs (no need to flush caches - they are disabled)  */
511 	__flush_tlb();
512 
513 	/* Intel (P6) standard MTRRs */
514 	mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
515 
516 	/*  Enable caches  */
517 	write_cr0(read_cr0() & 0xbfffffff);
518 
519 	/*  Restore value of CR4  */
520 	if ( cpu_has_pge )
521 		write_cr4(cr4);
522 	spin_unlock(&set_atomicity_lock);
523 }
524 
525 static void generic_set_all(void)
526 {
527 	unsigned long mask, count;
528 	unsigned long flags;
529 
530 	local_irq_save(flags);
531 	prepare_set();
532 
533 	/* Actually set the state */
534 	mask = set_mtrr_state();
535 
536 	/* also set PAT */
537 	pat_init();
538 
539 	post_set();
540 	local_irq_restore(flags);
541 
542 	/*  Use the atomic bitops to update the global mask  */
543 	for (count = 0; count < sizeof mask * 8; ++count) {
544 		if (mask & 0x01)
545 			set_bit(count, &smp_changes_mask);
546 		mask >>= 1;
547 	}
548 
549 }
550 
551 static void generic_set_mtrr(unsigned int reg, unsigned long base,
552 			     unsigned long size, mtrr_type type)
553 /*  [SUMMARY] Set variable MTRR register on the local CPU.
554     <reg> The register to set.
555     <base> The base address of the region.
556     <size> The size of the region. If this is 0 the region is disabled.
557     <type> The type of the region.
558     [RETURNS] Nothing.
559 */
560 {
561 	unsigned long flags;
562 	struct mtrr_var_range *vr;
563 
564 	vr = &mtrr_state.var_ranges[reg];
565 
566 	local_irq_save(flags);
567 	prepare_set();
568 
569 	if (size == 0) {
570 		/* The invalid bit is kept in the mask, so we simply clear the
571 		   relevant mask register to disable a range. */
572 		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
573 		memset(vr, 0, sizeof(struct mtrr_var_range));
574 	} else {
575 		vr->base_lo = base << PAGE_SHIFT | type;
576 		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
577 		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
578 		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
579 
580 		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
581 		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
582 	}
583 
584 	post_set();
585 	local_irq_restore(flags);
586 }
587 
588 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
589 {
590 	unsigned long lbase, last;
591 
592 	/*  For Intel PPro stepping <= 7, must be 4 MiB aligned
593 	    and not touch 0x70000000->0x7003FFFF */
594 	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
595 	    boot_cpu_data.x86_model == 1 &&
596 	    boot_cpu_data.x86_mask <= 7) {
597 		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
598 			printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
599 			return -EINVAL;
600 		}
601 		if (!(base + size < 0x70000 || base > 0x7003F) &&
602 		    (type == MTRR_TYPE_WRCOMB
603 		     || type == MTRR_TYPE_WRBACK)) {
604 			printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
605 			return -EINVAL;
606 		}
607 	}
608 
609 	/*  Check upper bits of base and last are equal and lower bits are 0
610 	    for base and 1 for last  */
611 	last = base + size - 1;
612 	for (lbase = base; !(lbase & 1) && (last & 1);
613 	     lbase = lbase >> 1, last = last >> 1) ;
614 	if (lbase != last) {
615 		printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
616 		       base, size);
617 		return -EINVAL;
618 	}
619 	return 0;
620 }
621 
622 
623 static int generic_have_wrcomb(void)
624 {
625 	unsigned long config, dummy;
626 	rdmsr(MTRRcap_MSR, config, dummy);
627 	return (config & (1 << 10));
628 }
629 
630 int positive_have_wrcomb(void)
631 {
632 	return 1;
633 }
634 
635 /* generic structure...
636  */
637 struct mtrr_ops generic_mtrr_ops = {
638 	.use_intel_if      = 1,
639 	.set_all	   = generic_set_all,
640 	.get               = generic_get_mtrr,
641 	.get_free_region   = generic_get_free_region,
642 	.set               = generic_set_mtrr,
643 	.validate_add_page = generic_validate_add_page,
644 	.have_wrcomb       = generic_have_wrcomb,
645 };
646