xref: /openbmc/linux/arch/x86/kvm/mtrr.c (revision 9ae38b4fb13597ce1821376d23958bbe4976c759)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vMTRR implementation
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  * Copyright(C) 2015 Intel Corporation.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Marcelo Tosatti <mtosatti@redhat.com>
13  *   Paolo Bonzini <pbonzini@redhat.com>
14  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kvm_host.h>
19 #include <asm/mtrr.h>
20 
21 #include "cpuid.h"
22 #include "mmu.h"
23 
24 #define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
25 #define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
26 #define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
27 
28 static bool is_mtrr_base_msr(unsigned int msr)
29 {
30 	/* MTRR base MSRs use even numbers, masks use odd numbers. */
31 	return !(msr & 0x1);
32 }
33 
34 static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
35 						    unsigned int msr)
36 {
37 	int index = (msr - 0x200) / 2;
38 
39 	return &vcpu->arch.mtrr_state.var_ranges[index];
40 }
41 
42 static bool msr_mtrr_valid(unsigned msr)
43 {
44 	switch (msr) {
45 	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
46 	case MSR_MTRRfix64K_00000:
47 	case MSR_MTRRfix16K_80000:
48 	case MSR_MTRRfix16K_A0000:
49 	case MSR_MTRRfix4K_C0000:
50 	case MSR_MTRRfix4K_C8000:
51 	case MSR_MTRRfix4K_D0000:
52 	case MSR_MTRRfix4K_D8000:
53 	case MSR_MTRRfix4K_E0000:
54 	case MSR_MTRRfix4K_E8000:
55 	case MSR_MTRRfix4K_F0000:
56 	case MSR_MTRRfix4K_F8000:
57 	case MSR_MTRRdefType:
58 	case MSR_IA32_CR_PAT:
59 		return true;
60 	}
61 	return false;
62 }
63 
64 static bool valid_mtrr_type(unsigned t)
65 {
66 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
67 }
68 
69 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
70 {
71 	int i;
72 	u64 mask;
73 
74 	if (!msr_mtrr_valid(msr))
75 		return false;
76 
77 	if (msr == MSR_IA32_CR_PAT) {
78 		return kvm_pat_valid(data);
79 	} else if (msr == MSR_MTRRdefType) {
80 		if (data & ~0xcff)
81 			return false;
82 		return valid_mtrr_type(data & 0xff);
83 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
84 		for (i = 0; i < 8 ; i++)
85 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
86 				return false;
87 		return true;
88 	}
89 
90 	/* variable MTRRs */
91 	WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
92 
93 	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
94 	if ((msr & 1) == 0) {
95 		/* MTRR base */
96 		if (!valid_mtrr_type(data & 0xff))
97 			return false;
98 		mask |= 0xf00;
99 	} else
100 		/* MTRR mask */
101 		mask |= 0x7ff;
102 
103 	return (data & mask) == 0;
104 }
105 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
106 
107 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
108 {
109 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
110 }
111 
112 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
113 {
114 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
115 }
116 
117 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
118 {
119 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
120 }
121 
122 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
123 {
124 	/*
125 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
126 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
127 	 * memory type is applied to all of physical memory.
128 	 *
129 	 * However, virtual machines can be run with CPUID such that
130 	 * there are no MTRRs.  In that case, the firmware will never
131 	 * enable MTRRs and it is obviously undesirable to run the
132 	 * guest entirely with UC memory and we use WB.
133 	 */
134 	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
135 		return MTRR_TYPE_UNCACHABLE;
136 	else
137 		return MTRR_TYPE_WRBACK;
138 }
139 
140 /*
141 * Three terms are used in the following code:
142 * - segment, it indicates the address segments covered by fixed MTRRs.
143 * - unit, it corresponds to the MSR entry in the segment.
144 * - range, a range is covered in one memory cache type.
145 */
146 struct fixed_mtrr_segment {
147 	u64 start;
148 	u64 end;
149 
150 	int range_shift;
151 
152 	/* the start position in kvm_mtrr.fixed_ranges[]. */
153 	int range_start;
154 };
155 
156 static struct fixed_mtrr_segment fixed_seg_table[] = {
157 	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
158 	{
159 		.start = 0x0,
160 		.end = 0x80000,
161 		.range_shift = 16, /* 64K */
162 		.range_start = 0,
163 	},
164 
165 	/*
166 	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
167 	 * 16K fixed mtrr.
168 	 */
169 	{
170 		.start = 0x80000,
171 		.end = 0xc0000,
172 		.range_shift = 14, /* 16K */
173 		.range_start = 8,
174 	},
175 
176 	/*
177 	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
178 	 * 4K fixed mtrr.
179 	 */
180 	{
181 		.start = 0xc0000,
182 		.end = 0x100000,
183 		.range_shift = 12, /* 12K */
184 		.range_start = 24,
185 	}
186 };
187 
188 /*
189  * The size of unit is covered in one MSR, one MSR entry contains
190  * 8 ranges so that unit size is always 8 * 2^range_shift.
191  */
192 static u64 fixed_mtrr_seg_unit_size(int seg)
193 {
194 	return 8 << fixed_seg_table[seg].range_shift;
195 }
196 
197 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
198 {
199 	switch (msr) {
200 	case MSR_MTRRfix64K_00000:
201 		*seg = 0;
202 		*unit = 0;
203 		break;
204 	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
205 		*seg = 1;
206 		*unit = array_index_nospec(
207 			msr - MSR_MTRRfix16K_80000,
208 			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
209 		break;
210 	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
211 		*seg = 2;
212 		*unit = array_index_nospec(
213 			msr - MSR_MTRRfix4K_C0000,
214 			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
215 		break;
216 	default:
217 		return false;
218 	}
219 
220 	return true;
221 }
222 
223 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
224 {
225 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
226 	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
227 
228 	*start = mtrr_seg->start + unit * unit_size;
229 	*end = *start + unit_size;
230 	WARN_ON(*end > mtrr_seg->end);
231 }
232 
233 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
234 {
235 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
236 
237 	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
238 		> mtrr_seg->end);
239 
240 	/* each unit has 8 ranges. */
241 	return mtrr_seg->range_start + 8 * unit;
242 }
243 
244 static int fixed_mtrr_seg_end_range_index(int seg)
245 {
246 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
247 	int n;
248 
249 	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
250 	return mtrr_seg->range_start + n - 1;
251 }
252 
253 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
254 {
255 	int seg, unit;
256 
257 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
258 		return false;
259 
260 	fixed_mtrr_seg_unit_range(seg, unit, start, end);
261 	return true;
262 }
263 
264 static int fixed_msr_to_range_index(u32 msr)
265 {
266 	int seg, unit;
267 
268 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
269 		return -1;
270 
271 	return fixed_mtrr_seg_unit_range_index(seg, unit);
272 }
273 
274 static int fixed_mtrr_addr_to_seg(u64 addr)
275 {
276 	struct fixed_mtrr_segment *mtrr_seg;
277 	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
278 
279 	for (seg = 0; seg < seg_num; seg++) {
280 		mtrr_seg = &fixed_seg_table[seg];
281 		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
282 			return seg;
283 	}
284 
285 	return -1;
286 }
287 
288 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
289 {
290 	struct fixed_mtrr_segment *mtrr_seg;
291 	int index;
292 
293 	mtrr_seg = &fixed_seg_table[seg];
294 	index = mtrr_seg->range_start;
295 	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
296 	return index;
297 }
298 
299 static u64 fixed_mtrr_range_end_addr(int seg, int index)
300 {
301 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
302 	int pos = index - mtrr_seg->range_start;
303 
304 	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
305 }
306 
307 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
308 {
309 	u64 mask;
310 
311 	*start = range->base & PAGE_MASK;
312 
313 	mask = range->mask & PAGE_MASK;
314 
315 	/* This cannot overflow because writing to the reserved bits of
316 	 * variable MTRRs causes a #GP.
317 	 */
318 	*end = (*start | ~mask) + 1;
319 }
320 
321 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
322 {
323 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
324 	gfn_t start, end;
325 
326 	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
327 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
328 		return;
329 
330 	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
331 		return;
332 
333 	/* fixed MTRRs. */
334 	if (fixed_msr_to_range(msr, &start, &end)) {
335 		if (!fixed_mtrr_is_enabled(mtrr_state))
336 			return;
337 	} else if (msr == MSR_MTRRdefType) {
338 		start = 0x0;
339 		end = ~0ULL;
340 	} else {
341 		/* variable range MTRRs. */
342 		var_mtrr_range(var_mtrr_msr_to_range(vcpu, msr), &start, &end);
343 	}
344 
345 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
346 }
347 
348 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
349 {
350 	return (range->mask & (1 << 11)) != 0;
351 }
352 
353 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
354 {
355 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
356 	struct kvm_mtrr_range *tmp, *cur;
357 
358 	cur = var_mtrr_msr_to_range(vcpu, msr);
359 
360 	/* remove the entry if it's in the list. */
361 	if (var_mtrr_range_is_valid(cur))
362 		list_del(&cur->node);
363 
364 	/*
365 	 * Set all illegal GPA bits in the mask, since those bits must
366 	 * implicitly be 0.  The bits are then cleared when reading them.
367 	 */
368 	if (is_mtrr_base_msr(msr))
369 		cur->base = data;
370 	else
371 		cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
372 
373 	/* add it to the list if it's enabled. */
374 	if (var_mtrr_range_is_valid(cur)) {
375 		list_for_each_entry(tmp, &mtrr_state->head, node)
376 			if (cur->base >= tmp->base)
377 				break;
378 		list_add_tail(&cur->node, &tmp->node);
379 	}
380 }
381 
382 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
383 {
384 	int index;
385 
386 	if (!kvm_mtrr_valid(vcpu, msr, data))
387 		return 1;
388 
389 	index = fixed_msr_to_range_index(msr);
390 	if (index >= 0)
391 		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
392 	else if (msr == MSR_MTRRdefType)
393 		vcpu->arch.mtrr_state.deftype = data;
394 	else if (msr == MSR_IA32_CR_PAT)
395 		vcpu->arch.pat = data;
396 	else
397 		set_var_mtrr_msr(vcpu, msr, data);
398 
399 	update_mtrr(vcpu, msr);
400 	return 0;
401 }
402 
403 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
404 {
405 	int index;
406 
407 	/* MSR_MTRRcap is a readonly MSR. */
408 	if (msr == MSR_MTRRcap) {
409 		/*
410 		 * SMRR = 0
411 		 * WC = 1
412 		 * FIX = 1
413 		 * VCNT = KVM_NR_VAR_MTRR
414 		 */
415 		*pdata = 0x500 | KVM_NR_VAR_MTRR;
416 		return 0;
417 	}
418 
419 	if (!msr_mtrr_valid(msr))
420 		return 1;
421 
422 	index = fixed_msr_to_range_index(msr);
423 	if (index >= 0)
424 		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
425 	else if (msr == MSR_MTRRdefType)
426 		*pdata = vcpu->arch.mtrr_state.deftype;
427 	else if (msr == MSR_IA32_CR_PAT)
428 		*pdata = vcpu->arch.pat;
429 	else {	/* Variable MTRRs */
430 		if (is_mtrr_base_msr(msr))
431 			*pdata = var_mtrr_msr_to_range(vcpu, msr)->base;
432 		else
433 			*pdata = var_mtrr_msr_to_range(vcpu, msr)->mask;
434 
435 		*pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
436 	}
437 
438 	return 0;
439 }
440 
441 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
442 {
443 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
444 }
445 
446 struct mtrr_iter {
447 	/* input fields. */
448 	struct kvm_mtrr *mtrr_state;
449 	u64 start;
450 	u64 end;
451 
452 	/* output fields. */
453 	int mem_type;
454 	/* mtrr is completely disabled? */
455 	bool mtrr_disabled;
456 	/* [start, end) is not fully covered in MTRRs? */
457 	bool partial_map;
458 
459 	/* private fields. */
460 	union {
461 		/* used for fixed MTRRs. */
462 		struct {
463 			int index;
464 			int seg;
465 		};
466 
467 		/* used for var MTRRs. */
468 		struct {
469 			struct kvm_mtrr_range *range;
470 			/* max address has been covered in var MTRRs. */
471 			u64 start_max;
472 		};
473 	};
474 
475 	bool fixed;
476 };
477 
478 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
479 {
480 	int seg, index;
481 
482 	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
483 		return false;
484 
485 	seg = fixed_mtrr_addr_to_seg(iter->start);
486 	if (seg < 0)
487 		return false;
488 
489 	iter->fixed = true;
490 	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
491 	iter->index = index;
492 	iter->seg = seg;
493 	return true;
494 }
495 
496 static bool match_var_range(struct mtrr_iter *iter,
497 			    struct kvm_mtrr_range *range)
498 {
499 	u64 start, end;
500 
501 	var_mtrr_range(range, &start, &end);
502 	if (!(start >= iter->end || end <= iter->start)) {
503 		iter->range = range;
504 
505 		/*
506 		 * the function is called when we do kvm_mtrr.head walking.
507 		 * Range has the minimum base address which interleaves
508 		 * [looker->start_max, looker->end).
509 		 */
510 		iter->partial_map |= iter->start_max < start;
511 
512 		/* update the max address has been covered. */
513 		iter->start_max = max(iter->start_max, end);
514 		return true;
515 	}
516 
517 	return false;
518 }
519 
520 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
521 {
522 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
523 
524 	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
525 		if (match_var_range(iter, iter->range))
526 			return;
527 
528 	iter->range = NULL;
529 	iter->partial_map |= iter->start_max < iter->end;
530 }
531 
532 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
533 {
534 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
535 
536 	iter->fixed = false;
537 	iter->start_max = iter->start;
538 	iter->range = NULL;
539 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
540 
541 	__mtrr_lookup_var_next(iter);
542 }
543 
544 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
545 {
546 	/* terminate the lookup. */
547 	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
548 		iter->fixed = false;
549 		iter->range = NULL;
550 		return;
551 	}
552 
553 	iter->index++;
554 
555 	/* have looked up for all fixed MTRRs. */
556 	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
557 		return mtrr_lookup_var_start(iter);
558 
559 	/* switch to next segment. */
560 	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
561 		iter->seg++;
562 }
563 
564 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
565 {
566 	__mtrr_lookup_var_next(iter);
567 }
568 
569 static void mtrr_lookup_start(struct mtrr_iter *iter)
570 {
571 	if (!mtrr_is_enabled(iter->mtrr_state)) {
572 		iter->mtrr_disabled = true;
573 		return;
574 	}
575 
576 	if (!mtrr_lookup_fixed_start(iter))
577 		mtrr_lookup_var_start(iter);
578 }
579 
580 static void mtrr_lookup_init(struct mtrr_iter *iter,
581 			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
582 {
583 	iter->mtrr_state = mtrr_state;
584 	iter->start = start;
585 	iter->end = end;
586 	iter->mtrr_disabled = false;
587 	iter->partial_map = false;
588 	iter->fixed = false;
589 	iter->range = NULL;
590 
591 	mtrr_lookup_start(iter);
592 }
593 
594 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
595 {
596 	if (iter->fixed) {
597 		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
598 		return true;
599 	}
600 
601 	if (iter->range) {
602 		iter->mem_type = iter->range->base & 0xff;
603 		return true;
604 	}
605 
606 	return false;
607 }
608 
609 static void mtrr_lookup_next(struct mtrr_iter *iter)
610 {
611 	if (iter->fixed)
612 		mtrr_lookup_fixed_next(iter);
613 	else
614 		mtrr_lookup_var_next(iter);
615 }
616 
617 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
618 	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
619 	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
620 
621 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
622 {
623 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
624 	struct mtrr_iter iter;
625 	u64 start, end;
626 	int type = -1;
627 	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
628 			       | (1 << MTRR_TYPE_WRTHROUGH);
629 
630 	start = gfn_to_gpa(gfn);
631 	end = start + PAGE_SIZE;
632 
633 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
634 		int curr_type = iter.mem_type;
635 
636 		/*
637 		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
638 		 * Precedences.
639 		 */
640 
641 		if (type == -1) {
642 			type = curr_type;
643 			continue;
644 		}
645 
646 		/*
647 		 * If two or more variable memory ranges match and the
648 		 * memory types are identical, then that memory type is
649 		 * used.
650 		 */
651 		if (type == curr_type)
652 			continue;
653 
654 		/*
655 		 * If two or more variable memory ranges match and one of
656 		 * the memory types is UC, the UC memory type used.
657 		 */
658 		if (curr_type == MTRR_TYPE_UNCACHABLE)
659 			return MTRR_TYPE_UNCACHABLE;
660 
661 		/*
662 		 * If two or more variable memory ranges match and the
663 		 * memory types are WT and WB, the WT memory type is used.
664 		 */
665 		if (((1 << type) & wt_wb_mask) &&
666 		      ((1 << curr_type) & wt_wb_mask)) {
667 			type = MTRR_TYPE_WRTHROUGH;
668 			continue;
669 		}
670 
671 		/*
672 		 * For overlaps not defined by the above rules, processor
673 		 * behavior is undefined.
674 		 */
675 
676 		/* We use WB for this undefined behavior. :( */
677 		return MTRR_TYPE_WRBACK;
678 	}
679 
680 	if (iter.mtrr_disabled)
681 		return mtrr_disabled_type(vcpu);
682 
683 	/* not contained in any MTRRs. */
684 	if (type == -1)
685 		return mtrr_default_type(mtrr_state);
686 
687 	/*
688 	 * We just check one page, partially covered by MTRRs is
689 	 * impossible.
690 	 */
691 	WARN_ON(iter.partial_map);
692 
693 	return type;
694 }
695 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
696 
697 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
698 					  int page_num)
699 {
700 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
701 	struct mtrr_iter iter;
702 	u64 start, end;
703 	int type = -1;
704 
705 	start = gfn_to_gpa(gfn);
706 	end = gfn_to_gpa(gfn + page_num);
707 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
708 		if (type == -1) {
709 			type = iter.mem_type;
710 			continue;
711 		}
712 
713 		if (type != iter.mem_type)
714 			return false;
715 	}
716 
717 	if (iter.mtrr_disabled)
718 		return true;
719 
720 	if (!iter.partial_map)
721 		return true;
722 
723 	if (type == -1)
724 		return true;
725 
726 	return type == mtrr_default_type(mtrr_state);
727 }
728