xref: /openbmc/linux/arch/x86/kvm/mtrr.c (revision 74e6a79f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vMTRR implementation
4  *
5  * Copyright (C) 2006 Qumranet, Inc.
6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
7  * Copyright(C) 2015 Intel Corporation.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Marcelo Tosatti <mtosatti@redhat.com>
13  *   Paolo Bonzini <pbonzini@redhat.com>
14  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kvm_host.h>
19 #include <asm/mtrr.h>
20 
21 #include "cpuid.h"
22 #include "mmu.h"
23 
24 #define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
25 #define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
26 #define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
27 
28 static bool msr_mtrr_valid(unsigned msr)
29 {
30 	switch (msr) {
31 	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
32 	case MSR_MTRRfix64K_00000:
33 	case MSR_MTRRfix16K_80000:
34 	case MSR_MTRRfix16K_A0000:
35 	case MSR_MTRRfix4K_C0000:
36 	case MSR_MTRRfix4K_C8000:
37 	case MSR_MTRRfix4K_D0000:
38 	case MSR_MTRRfix4K_D8000:
39 	case MSR_MTRRfix4K_E0000:
40 	case MSR_MTRRfix4K_E8000:
41 	case MSR_MTRRfix4K_F0000:
42 	case MSR_MTRRfix4K_F8000:
43 	case MSR_MTRRdefType:
44 	case MSR_IA32_CR_PAT:
45 		return true;
46 	}
47 	return false;
48 }
49 
50 static bool valid_mtrr_type(unsigned t)
51 {
52 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
53 }
54 
55 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
56 {
57 	int i;
58 	u64 mask;
59 
60 	if (!msr_mtrr_valid(msr))
61 		return false;
62 
63 	if (msr == MSR_IA32_CR_PAT) {
64 		return kvm_pat_valid(data);
65 	} else if (msr == MSR_MTRRdefType) {
66 		if (data & ~0xcff)
67 			return false;
68 		return valid_mtrr_type(data & 0xff);
69 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
70 		for (i = 0; i < 8 ; i++)
71 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
72 				return false;
73 		return true;
74 	}
75 
76 	/* variable MTRRs */
77 	WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
78 
79 	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
80 	if ((msr & 1) == 0) {
81 		/* MTRR base */
82 		if (!valid_mtrr_type(data & 0xff))
83 			return false;
84 		mask |= 0xf00;
85 	} else
86 		/* MTRR mask */
87 		mask |= 0x7ff;
88 
89 	return (data & mask) == 0;
90 }
91 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
92 
93 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
94 {
95 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
96 }
97 
98 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
99 {
100 	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
101 }
102 
103 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
104 {
105 	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
106 }
107 
108 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
109 {
110 	/*
111 	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
112 	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
113 	 * memory type is applied to all of physical memory.
114 	 *
115 	 * However, virtual machines can be run with CPUID such that
116 	 * there are no MTRRs.  In that case, the firmware will never
117 	 * enable MTRRs and it is obviously undesirable to run the
118 	 * guest entirely with UC memory and we use WB.
119 	 */
120 	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
121 		return MTRR_TYPE_UNCACHABLE;
122 	else
123 		return MTRR_TYPE_WRBACK;
124 }
125 
126 /*
127 * Three terms are used in the following code:
128 * - segment, it indicates the address segments covered by fixed MTRRs.
129 * - unit, it corresponds to the MSR entry in the segment.
130 * - range, a range is covered in one memory cache type.
131 */
132 struct fixed_mtrr_segment {
133 	u64 start;
134 	u64 end;
135 
136 	int range_shift;
137 
138 	/* the start position in kvm_mtrr.fixed_ranges[]. */
139 	int range_start;
140 };
141 
142 static struct fixed_mtrr_segment fixed_seg_table[] = {
143 	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
144 	{
145 		.start = 0x0,
146 		.end = 0x80000,
147 		.range_shift = 16, /* 64K */
148 		.range_start = 0,
149 	},
150 
151 	/*
152 	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
153 	 * 16K fixed mtrr.
154 	 */
155 	{
156 		.start = 0x80000,
157 		.end = 0xc0000,
158 		.range_shift = 14, /* 16K */
159 		.range_start = 8,
160 	},
161 
162 	/*
163 	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
164 	 * 4K fixed mtrr.
165 	 */
166 	{
167 		.start = 0xc0000,
168 		.end = 0x100000,
169 		.range_shift = 12, /* 12K */
170 		.range_start = 24,
171 	}
172 };
173 
174 /*
175  * The size of unit is covered in one MSR, one MSR entry contains
176  * 8 ranges so that unit size is always 8 * 2^range_shift.
177  */
178 static u64 fixed_mtrr_seg_unit_size(int seg)
179 {
180 	return 8 << fixed_seg_table[seg].range_shift;
181 }
182 
183 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
184 {
185 	switch (msr) {
186 	case MSR_MTRRfix64K_00000:
187 		*seg = 0;
188 		*unit = 0;
189 		break;
190 	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
191 		*seg = 1;
192 		*unit = array_index_nospec(
193 			msr - MSR_MTRRfix16K_80000,
194 			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
195 		break;
196 	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
197 		*seg = 2;
198 		*unit = array_index_nospec(
199 			msr - MSR_MTRRfix4K_C0000,
200 			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
201 		break;
202 	default:
203 		return false;
204 	}
205 
206 	return true;
207 }
208 
209 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
210 {
211 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
212 	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
213 
214 	*start = mtrr_seg->start + unit * unit_size;
215 	*end = *start + unit_size;
216 	WARN_ON(*end > mtrr_seg->end);
217 }
218 
219 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
220 {
221 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
222 
223 	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
224 		> mtrr_seg->end);
225 
226 	/* each unit has 8 ranges. */
227 	return mtrr_seg->range_start + 8 * unit;
228 }
229 
230 static int fixed_mtrr_seg_end_range_index(int seg)
231 {
232 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
233 	int n;
234 
235 	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
236 	return mtrr_seg->range_start + n - 1;
237 }
238 
239 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
240 {
241 	int seg, unit;
242 
243 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
244 		return false;
245 
246 	fixed_mtrr_seg_unit_range(seg, unit, start, end);
247 	return true;
248 }
249 
250 static int fixed_msr_to_range_index(u32 msr)
251 {
252 	int seg, unit;
253 
254 	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
255 		return -1;
256 
257 	return fixed_mtrr_seg_unit_range_index(seg, unit);
258 }
259 
260 static int fixed_mtrr_addr_to_seg(u64 addr)
261 {
262 	struct fixed_mtrr_segment *mtrr_seg;
263 	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
264 
265 	for (seg = 0; seg < seg_num; seg++) {
266 		mtrr_seg = &fixed_seg_table[seg];
267 		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
268 			return seg;
269 	}
270 
271 	return -1;
272 }
273 
274 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
275 {
276 	struct fixed_mtrr_segment *mtrr_seg;
277 	int index;
278 
279 	mtrr_seg = &fixed_seg_table[seg];
280 	index = mtrr_seg->range_start;
281 	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
282 	return index;
283 }
284 
285 static u64 fixed_mtrr_range_end_addr(int seg, int index)
286 {
287 	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
288 	int pos = index - mtrr_seg->range_start;
289 
290 	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
291 }
292 
293 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
294 {
295 	u64 mask;
296 
297 	*start = range->base & PAGE_MASK;
298 
299 	mask = range->mask & PAGE_MASK;
300 
301 	/* This cannot overflow because writing to the reserved bits of
302 	 * variable MTRRs causes a #GP.
303 	 */
304 	*end = (*start | ~mask) + 1;
305 }
306 
307 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
308 {
309 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
310 	gfn_t start, end;
311 	int index;
312 
313 	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
314 	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
315 		return;
316 
317 	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
318 		return;
319 
320 	/* fixed MTRRs. */
321 	if (fixed_msr_to_range(msr, &start, &end)) {
322 		if (!fixed_mtrr_is_enabled(mtrr_state))
323 			return;
324 	} else if (msr == MSR_MTRRdefType) {
325 		start = 0x0;
326 		end = ~0ULL;
327 	} else {
328 		/* variable range MTRRs. */
329 		index = (msr - 0x200) / 2;
330 		var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
331 	}
332 
333 	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
334 }
335 
336 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
337 {
338 	return (range->mask & (1 << 11)) != 0;
339 }
340 
341 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
342 {
343 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
344 	struct kvm_mtrr_range *tmp, *cur;
345 	int index, is_mtrr_mask;
346 
347 	index = (msr - 0x200) / 2;
348 	is_mtrr_mask = msr - 0x200 - 2 * index;
349 	cur = &mtrr_state->var_ranges[index];
350 
351 	/* remove the entry if it's in the list. */
352 	if (var_mtrr_range_is_valid(cur))
353 		list_del(&mtrr_state->var_ranges[index].node);
354 
355 	/*
356 	 * Set all illegal GPA bits in the mask, since those bits must
357 	 * implicitly be 0.  The bits are then cleared when reading them.
358 	 */
359 	if (!is_mtrr_mask)
360 		cur->base = data;
361 	else
362 		cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
363 
364 	/* add it to the list if it's enabled. */
365 	if (var_mtrr_range_is_valid(cur)) {
366 		list_for_each_entry(tmp, &mtrr_state->head, node)
367 			if (cur->base >= tmp->base)
368 				break;
369 		list_add_tail(&cur->node, &tmp->node);
370 	}
371 }
372 
373 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
374 {
375 	int index;
376 
377 	if (!kvm_mtrr_valid(vcpu, msr, data))
378 		return 1;
379 
380 	index = fixed_msr_to_range_index(msr);
381 	if (index >= 0)
382 		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
383 	else if (msr == MSR_MTRRdefType)
384 		vcpu->arch.mtrr_state.deftype = data;
385 	else if (msr == MSR_IA32_CR_PAT)
386 		vcpu->arch.pat = data;
387 	else
388 		set_var_mtrr_msr(vcpu, msr, data);
389 
390 	update_mtrr(vcpu, msr);
391 	return 0;
392 }
393 
394 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
395 {
396 	int index;
397 
398 	/* MSR_MTRRcap is a readonly MSR. */
399 	if (msr == MSR_MTRRcap) {
400 		/*
401 		 * SMRR = 0
402 		 * WC = 1
403 		 * FIX = 1
404 		 * VCNT = KVM_NR_VAR_MTRR
405 		 */
406 		*pdata = 0x500 | KVM_NR_VAR_MTRR;
407 		return 0;
408 	}
409 
410 	if (!msr_mtrr_valid(msr))
411 		return 1;
412 
413 	index = fixed_msr_to_range_index(msr);
414 	if (index >= 0)
415 		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
416 	else if (msr == MSR_MTRRdefType)
417 		*pdata = vcpu->arch.mtrr_state.deftype;
418 	else if (msr == MSR_IA32_CR_PAT)
419 		*pdata = vcpu->arch.pat;
420 	else {	/* Variable MTRRs */
421 		int is_mtrr_mask;
422 
423 		index = (msr - 0x200) / 2;
424 		is_mtrr_mask = msr - 0x200 - 2 * index;
425 		if (!is_mtrr_mask)
426 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 		else
428 			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
429 
430 		*pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
431 	}
432 
433 	return 0;
434 }
435 
436 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
437 {
438 	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
439 }
440 
441 struct mtrr_iter {
442 	/* input fields. */
443 	struct kvm_mtrr *mtrr_state;
444 	u64 start;
445 	u64 end;
446 
447 	/* output fields. */
448 	int mem_type;
449 	/* mtrr is completely disabled? */
450 	bool mtrr_disabled;
451 	/* [start, end) is not fully covered in MTRRs? */
452 	bool partial_map;
453 
454 	/* private fields. */
455 	union {
456 		/* used for fixed MTRRs. */
457 		struct {
458 			int index;
459 			int seg;
460 		};
461 
462 		/* used for var MTRRs. */
463 		struct {
464 			struct kvm_mtrr_range *range;
465 			/* max address has been covered in var MTRRs. */
466 			u64 start_max;
467 		};
468 	};
469 
470 	bool fixed;
471 };
472 
473 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
474 {
475 	int seg, index;
476 
477 	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
478 		return false;
479 
480 	seg = fixed_mtrr_addr_to_seg(iter->start);
481 	if (seg < 0)
482 		return false;
483 
484 	iter->fixed = true;
485 	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
486 	iter->index = index;
487 	iter->seg = seg;
488 	return true;
489 }
490 
491 static bool match_var_range(struct mtrr_iter *iter,
492 			    struct kvm_mtrr_range *range)
493 {
494 	u64 start, end;
495 
496 	var_mtrr_range(range, &start, &end);
497 	if (!(start >= iter->end || end <= iter->start)) {
498 		iter->range = range;
499 
500 		/*
501 		 * the function is called when we do kvm_mtrr.head walking.
502 		 * Range has the minimum base address which interleaves
503 		 * [looker->start_max, looker->end).
504 		 */
505 		iter->partial_map |= iter->start_max < start;
506 
507 		/* update the max address has been covered. */
508 		iter->start_max = max(iter->start_max, end);
509 		return true;
510 	}
511 
512 	return false;
513 }
514 
515 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
516 {
517 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
518 
519 	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
520 		if (match_var_range(iter, iter->range))
521 			return;
522 
523 	iter->range = NULL;
524 	iter->partial_map |= iter->start_max < iter->end;
525 }
526 
527 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
528 {
529 	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
530 
531 	iter->fixed = false;
532 	iter->start_max = iter->start;
533 	iter->range = NULL;
534 	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
535 
536 	__mtrr_lookup_var_next(iter);
537 }
538 
539 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
540 {
541 	/* terminate the lookup. */
542 	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
543 		iter->fixed = false;
544 		iter->range = NULL;
545 		return;
546 	}
547 
548 	iter->index++;
549 
550 	/* have looked up for all fixed MTRRs. */
551 	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
552 		return mtrr_lookup_var_start(iter);
553 
554 	/* switch to next segment. */
555 	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
556 		iter->seg++;
557 }
558 
559 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
560 {
561 	__mtrr_lookup_var_next(iter);
562 }
563 
564 static void mtrr_lookup_start(struct mtrr_iter *iter)
565 {
566 	if (!mtrr_is_enabled(iter->mtrr_state)) {
567 		iter->mtrr_disabled = true;
568 		return;
569 	}
570 
571 	if (!mtrr_lookup_fixed_start(iter))
572 		mtrr_lookup_var_start(iter);
573 }
574 
575 static void mtrr_lookup_init(struct mtrr_iter *iter,
576 			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
577 {
578 	iter->mtrr_state = mtrr_state;
579 	iter->start = start;
580 	iter->end = end;
581 	iter->mtrr_disabled = false;
582 	iter->partial_map = false;
583 	iter->fixed = false;
584 	iter->range = NULL;
585 
586 	mtrr_lookup_start(iter);
587 }
588 
589 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
590 {
591 	if (iter->fixed) {
592 		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
593 		return true;
594 	}
595 
596 	if (iter->range) {
597 		iter->mem_type = iter->range->base & 0xff;
598 		return true;
599 	}
600 
601 	return false;
602 }
603 
604 static void mtrr_lookup_next(struct mtrr_iter *iter)
605 {
606 	if (iter->fixed)
607 		mtrr_lookup_fixed_next(iter);
608 	else
609 		mtrr_lookup_var_next(iter);
610 }
611 
612 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
613 	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
614 	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
615 
616 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
617 {
618 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
619 	struct mtrr_iter iter;
620 	u64 start, end;
621 	int type = -1;
622 	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
623 			       | (1 << MTRR_TYPE_WRTHROUGH);
624 
625 	start = gfn_to_gpa(gfn);
626 	end = start + PAGE_SIZE;
627 
628 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
629 		int curr_type = iter.mem_type;
630 
631 		/*
632 		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
633 		 * Precedences.
634 		 */
635 
636 		if (type == -1) {
637 			type = curr_type;
638 			continue;
639 		}
640 
641 		/*
642 		 * If two or more variable memory ranges match and the
643 		 * memory types are identical, then that memory type is
644 		 * used.
645 		 */
646 		if (type == curr_type)
647 			continue;
648 
649 		/*
650 		 * If two or more variable memory ranges match and one of
651 		 * the memory types is UC, the UC memory type used.
652 		 */
653 		if (curr_type == MTRR_TYPE_UNCACHABLE)
654 			return MTRR_TYPE_UNCACHABLE;
655 
656 		/*
657 		 * If two or more variable memory ranges match and the
658 		 * memory types are WT and WB, the WT memory type is used.
659 		 */
660 		if (((1 << type) & wt_wb_mask) &&
661 		      ((1 << curr_type) & wt_wb_mask)) {
662 			type = MTRR_TYPE_WRTHROUGH;
663 			continue;
664 		}
665 
666 		/*
667 		 * For overlaps not defined by the above rules, processor
668 		 * behavior is undefined.
669 		 */
670 
671 		/* We use WB for this undefined behavior. :( */
672 		return MTRR_TYPE_WRBACK;
673 	}
674 
675 	if (iter.mtrr_disabled)
676 		return mtrr_disabled_type(vcpu);
677 
678 	/* not contained in any MTRRs. */
679 	if (type == -1)
680 		return mtrr_default_type(mtrr_state);
681 
682 	/*
683 	 * We just check one page, partially covered by MTRRs is
684 	 * impossible.
685 	 */
686 	WARN_ON(iter.partial_map);
687 
688 	return type;
689 }
690 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
691 
692 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
693 					  int page_num)
694 {
695 	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
696 	struct mtrr_iter iter;
697 	u64 start, end;
698 	int type = -1;
699 
700 	start = gfn_to_gpa(gfn);
701 	end = gfn_to_gpa(gfn + page_num);
702 	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
703 		if (type == -1) {
704 			type = iter.mem_type;
705 			continue;
706 		}
707 
708 		if (type != iter.mem_type)
709 			return false;
710 	}
711 
712 	if (iter.mtrr_disabled)
713 		return true;
714 
715 	if (!iter.partial_map)
716 		return true;
717 
718 	if (type == -1)
719 		return true;
720 
721 	return type == mtrr_default_type(mtrr_state);
722 }
723