xref: /openbmc/linux/arch/x86/kvm/mtrr.c (revision 34a83deac31cd9fdecef331578422095af2db4b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vMTRR implementation
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  * Copyright(C) 2015 Intel Corporation.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Marcelo Tosatti <mtosatti@redhat.com>
13  *   Paolo Bonzini <pbonzini@redhat.com>
14  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kvm_host.h>
19 #include <asm/mtrr.h>
20 
21 #include "cpuid.h"
22 #include "mmu.h"
23 
24 #define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
25 #define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
26 #define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
27 
28 static bool is_mtrr_base_msr(unsigned int msr)
29 {
30 	/* MTRR base MSRs use even numbers, masks use odd numbers. */
31 	return !(msr & 0x1);
32 }
33 
34 static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
35 						    unsigned int msr)
36 {
37 	int index = (msr - MTRRphysBase_MSR(0)) / 2;
38 
39 	return &vcpu->arch.mtrr_state.var_ranges[index];
40 }
41 
42 static bool msr_mtrr_valid(unsigned msr)
43 {
44 	switch (msr) {
45 	case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
46 	case MSR_MTRRfix64K_00000:
47 	case MSR_MTRRfix16K_80000:
48 	case MSR_MTRRfix16K_A0000:
49 	case MSR_MTRRfix4K_C0000:
50 	case MSR_MTRRfix4K_C8000:
51 	case MSR_MTRRfix4K_D0000:
52 	case MSR_MTRRfix4K_D8000:
53 	case MSR_MTRRfix4K_E0000:
54 	case MSR_MTRRfix4K_E8000:
55 	case MSR_MTRRfix4K_F0000:
56 	case MSR_MTRRfix4K_F8000:
57 	case MSR_MTRRdefType:
58 	case MSR_IA32_CR_PAT:
59 		return true;
60 	}
61 	return false;
62 }
63 
64 static bool valid_mtrr_type(unsigned t)
65 {
66 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
67 }
68 
69 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
70 {
71 	int i;
72 	u64 mask;
73 
74 	if (!msr_mtrr_valid(msr))
75 		return false;
76 
77 	if (msr == MSR_IA32_CR_PAT) {
78 		return kvm_pat_valid(data);
79 	} else if (msr == MSR_MTRRdefType) {
80 		if (data & ~0xcff)
81 			return false;
82 		return valid_mtrr_type(data & 0xff);
83 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
84 		for (i = 0; i < 8 ; i++)
85 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
86 				return false;
87 		return true;
88 	}
89 
90 	/* variable MTRRs */
91 	WARN_ON(!(msr >= MTRRphysBase_MSR(0) &&
92 		  msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)));
93 
94 	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
95 	if ((msr & 1) == 0) {
96 		/* MTRR base */
97 		if (!valid_mtrr_type(data & 0xff))
98 			return false;
99 		mask |= 0xf00;
100 	} else
101 		/* MTRR mask */
102 		mask |= 0x7ff;
103 
104 	return (data & mask) == 0;
105 }
106 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
107 
108 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
109 {
110 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
111 }
112 
113 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
114 {
115 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
116 }
117 
118 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
119 {
120 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121 }
122 
123 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124 {
125 	/*
126 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 	 * memory type is applied to all of physical memory.
129 	 *
130 	 * However, virtual machines can be run with CPUID such that
131 	 * there are no MTRRs.  In that case, the firmware will never
132 	 * enable MTRRs and it is obviously undesirable to run the
133 	 * guest entirely with UC memory and we use WB.
134 	 */
135 	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
136 		return MTRR_TYPE_UNCACHABLE;
137 	else
138 		return MTRR_TYPE_WRBACK;
139 }
140 
141 /*
142 * Three terms are used in the following code:
143 * - segment, it indicates the address segments covered by fixed MTRRs.
144 * - unit, it corresponds to the MSR entry in the segment.
145 * - range, a range is covered in one memory cache type.
146 */
147 struct fixed_mtrr_segment {
148 	u64 start;
149 	u64 end;
150 
151 	int range_shift;
152 
153 	/* the start position in kvm_mtrr.fixed_ranges[]. */
154 	int range_start;
155 };
156 
157 static struct fixed_mtrr_segment fixed_seg_table[] = {
158 	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
159 	{
160 		.start = 0x0,
161 		.end = 0x80000,
162 		.range_shift = 16, /* 64K */
163 		.range_start = 0,
164 	},
165 
166 	/*
167 	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
168 	 * 16K fixed mtrr.
169 	 */
170 	{
171 		.start = 0x80000,
172 		.end = 0xc0000,
173 		.range_shift = 14, /* 16K */
174 		.range_start = 8,
175 	},
176 
177 	/*
178 	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
179 	 * 4K fixed mtrr.
180 	 */
181 	{
182 		.start = 0xc0000,
183 		.end = 0x100000,
184 		.range_shift = 12, /* 12K */
185 		.range_start = 24,
186 	}
187 };
188 
189 /*
190  * The size of unit is covered in one MSR, one MSR entry contains
191  * 8 ranges so that unit size is always 8 * 2^range_shift.
192  */
193 static u64 fixed_mtrr_seg_unit_size(int seg)
194 {
195 	return 8 << fixed_seg_table[seg].range_shift;
196 }
197 
198 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
199 {
200 	switch (msr) {
201 	case MSR_MTRRfix64K_00000:
202 		*seg = 0;
203 		*unit = 0;
204 		break;
205 	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
206 		*seg = 1;
207 		*unit = array_index_nospec(
208 			msr - MSR_MTRRfix16K_80000,
209 			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
210 		break;
211 	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
212 		*seg = 2;
213 		*unit = array_index_nospec(
214 			msr - MSR_MTRRfix4K_C0000,
215 			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
216 		break;
217 	default:
218 		return false;
219 	}
220 
221 	return true;
222 }
223 
224 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
225 {
226 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
227 	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
228 
229 	*start = mtrr_seg->start + unit * unit_size;
230 	*end = *start + unit_size;
231 	WARN_ON(*end > mtrr_seg->end);
232 }
233 
234 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
235 {
236 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
237 
238 	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
239 		> mtrr_seg->end);
240 
241 	/* each unit has 8 ranges. */
242 	return mtrr_seg->range_start + 8 * unit;
243 }
244 
245 static int fixed_mtrr_seg_end_range_index(int seg)
246 {
247 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
248 	int n;
249 
250 	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
251 	return mtrr_seg->range_start + n - 1;
252 }
253 
254 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
255 {
256 	int seg, unit;
257 
258 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
259 		return false;
260 
261 	fixed_mtrr_seg_unit_range(seg, unit, start, end);
262 	return true;
263 }
264 
265 static int fixed_msr_to_range_index(u32 msr)
266 {
267 	int seg, unit;
268 
269 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
270 		return -1;
271 
272 	return fixed_mtrr_seg_unit_range_index(seg, unit);
273 }
274 
275 static int fixed_mtrr_addr_to_seg(u64 addr)
276 {
277 	struct fixed_mtrr_segment *mtrr_seg;
278 	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
279 
280 	for (seg = 0; seg < seg_num; seg++) {
281 		mtrr_seg = &fixed_seg_table[seg];
282 		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
283 			return seg;
284 	}
285 
286 	return -1;
287 }
288 
289 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
290 {
291 	struct fixed_mtrr_segment *mtrr_seg;
292 	int index;
293 
294 	mtrr_seg = &fixed_seg_table[seg];
295 	index = mtrr_seg->range_start;
296 	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
297 	return index;
298 }
299 
300 static u64 fixed_mtrr_range_end_addr(int seg, int index)
301 {
302 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
303 	int pos = index - mtrr_seg->range_start;
304 
305 	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
306 }
307 
308 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
309 {
310 	u64 mask;
311 
312 	*start = range->base & PAGE_MASK;
313 
314 	mask = range->mask & PAGE_MASK;
315 
316 	/* This cannot overflow because writing to the reserved bits of
317 	 * variable MTRRs causes a #GP.
318 	 */
319 	*end = (*start | ~mask) + 1;
320 }
321 
322 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
323 {
324 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
325 	gfn_t start, end;
326 
327 	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
328 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
329 		return;
330 
331 	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
332 		return;
333 
334 	/* fixed MTRRs. */
335 	if (fixed_msr_to_range(msr, &start, &end)) {
336 		if (!fixed_mtrr_is_enabled(mtrr_state))
337 			return;
338 	} else if (msr == MSR_MTRRdefType) {
339 		start = 0x0;
340 		end = ~0ULL;
341 	} else {
342 		/* variable range MTRRs. */
343 		var_mtrr_range(var_mtrr_msr_to_range(vcpu, msr), &start, &end);
344 	}
345 
346 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
347 }
348 
349 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
350 {
351 	return (range->mask & (1 << 11)) != 0;
352 }
353 
354 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
355 {
356 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
357 	struct kvm_mtrr_range *tmp, *cur;
358 
359 	cur = var_mtrr_msr_to_range(vcpu, msr);
360 
361 	/* remove the entry if it's in the list. */
362 	if (var_mtrr_range_is_valid(cur))
363 		list_del(&cur->node);
364 
365 	/*
366 	 * Set all illegal GPA bits in the mask, since those bits must
367 	 * implicitly be 0.  The bits are then cleared when reading them.
368 	 */
369 	if (is_mtrr_base_msr(msr))
370 		cur->base = data;
371 	else
372 		cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
373 
374 	/* add it to the list if it's enabled. */
375 	if (var_mtrr_range_is_valid(cur)) {
376 		list_for_each_entry(tmp, &mtrr_state->head, node)
377 			if (cur->base >= tmp->base)
378 				break;
379 		list_add_tail(&cur->node, &tmp->node);
380 	}
381 }
382 
383 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
384 {
385 	int index;
386 
387 	if (!kvm_mtrr_valid(vcpu, msr, data))
388 		return 1;
389 
390 	index = fixed_msr_to_range_index(msr);
391 	if (index >= 0)
392 		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
393 	else if (msr == MSR_MTRRdefType)
394 		vcpu->arch.mtrr_state.deftype = data;
395 	else if (msr == MSR_IA32_CR_PAT)
396 		vcpu->arch.pat = data;
397 	else
398 		set_var_mtrr_msr(vcpu, msr, data);
399 
400 	update_mtrr(vcpu, msr);
401 	return 0;
402 }
403 
404 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
405 {
406 	int index;
407 
408 	/* MSR_MTRRcap is a readonly MSR. */
409 	if (msr == MSR_MTRRcap) {
410 		/*
411 		 * SMRR = 0
412 		 * WC = 1
413 		 * FIX = 1
414 		 * VCNT = KVM_NR_VAR_MTRR
415 		 */
416 		*pdata = 0x500 | KVM_NR_VAR_MTRR;
417 		return 0;
418 	}
419 
420 	if (!msr_mtrr_valid(msr))
421 		return 1;
422 
423 	index = fixed_msr_to_range_index(msr);
424 	if (index >= 0)
425 		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
426 	else if (msr == MSR_MTRRdefType)
427 		*pdata = vcpu->arch.mtrr_state.deftype;
428 	else if (msr == MSR_IA32_CR_PAT)
429 		*pdata = vcpu->arch.pat;
430 	else {	/* Variable MTRRs */
431 		if (is_mtrr_base_msr(msr))
432 			*pdata = var_mtrr_msr_to_range(vcpu, msr)->base;
433 		else
434 			*pdata = var_mtrr_msr_to_range(vcpu, msr)->mask;
435 
436 		*pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
437 	}
438 
439 	return 0;
440 }
441 
442 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
443 {
444 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
445 }
446 
447 struct mtrr_iter {
448 	/* input fields. */
449 	struct kvm_mtrr *mtrr_state;
450 	u64 start;
451 	u64 end;
452 
453 	/* output fields. */
454 	int mem_type;
455 	/* mtrr is completely disabled? */
456 	bool mtrr_disabled;
457 	/* [start, end) is not fully covered in MTRRs? */
458 	bool partial_map;
459 
460 	/* private fields. */
461 	union {
462 		/* used for fixed MTRRs. */
463 		struct {
464 			int index;
465 			int seg;
466 		};
467 
468 		/* used for var MTRRs. */
469 		struct {
470 			struct kvm_mtrr_range *range;
471 			/* max address has been covered in var MTRRs. */
472 			u64 start_max;
473 		};
474 	};
475 
476 	bool fixed;
477 };
478 
479 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
480 {
481 	int seg, index;
482 
483 	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
484 		return false;
485 
486 	seg = fixed_mtrr_addr_to_seg(iter->start);
487 	if (seg < 0)
488 		return false;
489 
490 	iter->fixed = true;
491 	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
492 	iter->index = index;
493 	iter->seg = seg;
494 	return true;
495 }
496 
497 static bool match_var_range(struct mtrr_iter *iter,
498 			    struct kvm_mtrr_range *range)
499 {
500 	u64 start, end;
501 
502 	var_mtrr_range(range, &start, &end);
503 	if (!(start >= iter->end || end <= iter->start)) {
504 		iter->range = range;
505 
506 		/*
507 		 * the function is called when we do kvm_mtrr.head walking.
508 		 * Range has the minimum base address which interleaves
509 		 * [looker->start_max, looker->end).
510 		 */
511 		iter->partial_map |= iter->start_max < start;
512 
513 		/* update the max address has been covered. */
514 		iter->start_max = max(iter->start_max, end);
515 		return true;
516 	}
517 
518 	return false;
519 }
520 
521 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
522 {
523 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
524 
525 	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
526 		if (match_var_range(iter, iter->range))
527 			return;
528 
529 	iter->range = NULL;
530 	iter->partial_map |= iter->start_max < iter->end;
531 }
532 
533 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
534 {
535 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
536 
537 	iter->fixed = false;
538 	iter->start_max = iter->start;
539 	iter->range = NULL;
540 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
541 
542 	__mtrr_lookup_var_next(iter);
543 }
544 
545 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
546 {
547 	/* terminate the lookup. */
548 	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
549 		iter->fixed = false;
550 		iter->range = NULL;
551 		return;
552 	}
553 
554 	iter->index++;
555 
556 	/* have looked up for all fixed MTRRs. */
557 	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
558 		return mtrr_lookup_var_start(iter);
559 
560 	/* switch to next segment. */
561 	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
562 		iter->seg++;
563 }
564 
565 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
566 {
567 	__mtrr_lookup_var_next(iter);
568 }
569 
570 static void mtrr_lookup_start(struct mtrr_iter *iter)
571 {
572 	if (!mtrr_is_enabled(iter->mtrr_state)) {
573 		iter->mtrr_disabled = true;
574 		return;
575 	}
576 
577 	if (!mtrr_lookup_fixed_start(iter))
578 		mtrr_lookup_var_start(iter);
579 }
580 
581 static void mtrr_lookup_init(struct mtrr_iter *iter,
582 			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
583 {
584 	iter->mtrr_state = mtrr_state;
585 	iter->start = start;
586 	iter->end = end;
587 	iter->mtrr_disabled = false;
588 	iter->partial_map = false;
589 	iter->fixed = false;
590 	iter->range = NULL;
591 
592 	mtrr_lookup_start(iter);
593 }
594 
595 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
596 {
597 	if (iter->fixed) {
598 		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
599 		return true;
600 	}
601 
602 	if (iter->range) {
603 		iter->mem_type = iter->range->base & 0xff;
604 		return true;
605 	}
606 
607 	return false;
608 }
609 
610 static void mtrr_lookup_next(struct mtrr_iter *iter)
611 {
612 	if (iter->fixed)
613 		mtrr_lookup_fixed_next(iter);
614 	else
615 		mtrr_lookup_var_next(iter);
616 }
617 
618 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
619 	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
620 	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
621 
622 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
623 {
624 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
625 	struct mtrr_iter iter;
626 	u64 start, end;
627 	int type = -1;
628 	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
629 			       | (1 << MTRR_TYPE_WRTHROUGH);
630 
631 	start = gfn_to_gpa(gfn);
632 	end = start + PAGE_SIZE;
633 
634 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
635 		int curr_type = iter.mem_type;
636 
637 		/*
638 		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
639 		 * Precedences.
640 		 */
641 
642 		if (type == -1) {
643 			type = curr_type;
644 			continue;
645 		}
646 
647 		/*
648 		 * If two or more variable memory ranges match and the
649 		 * memory types are identical, then that memory type is
650 		 * used.
651 		 */
652 		if (type == curr_type)
653 			continue;
654 
655 		/*
656 		 * If two or more variable memory ranges match and one of
657 		 * the memory types is UC, the UC memory type used.
658 		 */
659 		if (curr_type == MTRR_TYPE_UNCACHABLE)
660 			return MTRR_TYPE_UNCACHABLE;
661 
662 		/*
663 		 * If two or more variable memory ranges match and the
664 		 * memory types are WT and WB, the WT memory type is used.
665 		 */
666 		if (((1 << type) & wt_wb_mask) &&
667 		      ((1 << curr_type) & wt_wb_mask)) {
668 			type = MTRR_TYPE_WRTHROUGH;
669 			continue;
670 		}
671 
672 		/*
673 		 * For overlaps not defined by the above rules, processor
674 		 * behavior is undefined.
675 		 */
676 
677 		/* We use WB for this undefined behavior. :( */
678 		return MTRR_TYPE_WRBACK;
679 	}
680 
681 	if (iter.mtrr_disabled)
682 		return mtrr_disabled_type(vcpu);
683 
684 	/* not contained in any MTRRs. */
685 	if (type == -1)
686 		return mtrr_default_type(mtrr_state);
687 
688 	/*
689 	 * We just check one page, partially covered by MTRRs is
690 	 * impossible.
691 	 */
692 	WARN_ON(iter.partial_map);
693 
694 	return type;
695 }
696 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
697 
698 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
699 					  int page_num)
700 {
701 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
702 	struct mtrr_iter iter;
703 	u64 start, end;
704 	int type = -1;
705 
706 	start = gfn_to_gpa(gfn);
707 	end = gfn_to_gpa(gfn + page_num);
708 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
709 		if (type == -1) {
710 			type = iter.mem_type;
711 			continue;
712 		}
713 
714 		if (type != iter.mem_type)
715 			return false;
716 	}
717 
718 	if (iter.mtrr_disabled)
719 		return true;
720 
721 	if (!iter.partial_map)
722 		return true;
723 
724 	if (type == -1)
725 		return true;
726 
727 	return type == mtrr_default_type(mtrr_state);
728 }
729