xref: /openbmc/linux/arch/powerpc/kvm/book3s_64_mmu.c (revision be709d48)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/book3s/64/mmu-hash.h>
29 
30 /* #define DEBUG_MMU */
31 
32 #ifdef DEBUG_MMU
33 #define dprintk(X...) printk(KERN_INFO X)
34 #else
35 #define dprintk(X...) do { } while(0)
36 #endif
37 
38 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
39 {
40 	unsigned long msr = vcpu->arch.intr_msr;
41 	unsigned long cur_msr = kvmppc_get_msr(vcpu);
42 
43 	/* If transactional, change to suspend mode on IRQ delivery */
44 	if (MSR_TM_TRANSACTIONAL(cur_msr))
45 		msr |= MSR_TS_S;
46 	else
47 		msr |= cur_msr & MSR_TS_MASK;
48 
49 	kvmppc_set_msr(vcpu, msr);
50 }
51 
52 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
53 				struct kvm_vcpu *vcpu,
54 				gva_t eaddr)
55 {
56 	int i;
57 	u64 esid = GET_ESID(eaddr);
58 	u64 esid_1t = GET_ESID_1T(eaddr);
59 
60 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
61 		u64 cmp_esid = esid;
62 
63 		if (!vcpu->arch.slb[i].valid)
64 			continue;
65 
66 		if (vcpu->arch.slb[i].tb)
67 			cmp_esid = esid_1t;
68 
69 		if (vcpu->arch.slb[i].esid == cmp_esid)
70 			return &vcpu->arch.slb[i];
71 	}
72 
73 	dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
74 		eaddr, esid, esid_1t);
75 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
76 	    if (vcpu->arch.slb[i].vsid)
77 		dprintk("  %d: %c%c%c %llx %llx\n", i,
78 			vcpu->arch.slb[i].valid ? 'v' : ' ',
79 			vcpu->arch.slb[i].large ? 'l' : ' ',
80 			vcpu->arch.slb[i].tb    ? 't' : ' ',
81 			vcpu->arch.slb[i].esid,
82 			vcpu->arch.slb[i].vsid);
83 	}
84 
85 	return NULL;
86 }
87 
88 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
89 {
90 	return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
91 }
92 
93 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
94 {
95 	return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
96 }
97 
98 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
99 {
100 	eaddr &= kvmppc_slb_offset_mask(slb);
101 
102 	return (eaddr >> VPN_SHIFT) |
103 		((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
104 }
105 
106 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
107 					 bool data)
108 {
109 	struct kvmppc_slb *slb;
110 
111 	slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
112 	if (!slb)
113 		return 0;
114 
115 	return kvmppc_slb_calc_vpn(slb, eaddr);
116 }
117 
118 static int mmu_pagesize(int mmu_pg)
119 {
120 	switch (mmu_pg) {
121 	case MMU_PAGE_64K:
122 		return 16;
123 	case MMU_PAGE_16M:
124 		return 24;
125 	}
126 	return 12;
127 }
128 
129 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
130 {
131 	return mmu_pagesize(slbe->base_page_size);
132 }
133 
134 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
135 {
136 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
137 
138 	return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
139 }
140 
141 static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
142 				struct kvmppc_slb *slbe, gva_t eaddr,
143 				bool second)
144 {
145 	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
146 	u64 hash, pteg, htabsize;
147 	u32 ssize;
148 	hva_t r;
149 	u64 vpn;
150 
151 	htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
152 
153 	vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
154 	ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
155 	hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
156 	if (second)
157 		hash = ~hash;
158 	hash &= ((1ULL << 39ULL) - 1ULL);
159 	hash &= htabsize;
160 	hash <<= 7ULL;
161 
162 	pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
163 	pteg |= hash;
164 
165 	dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
166 		page, vcpu_book3s->sdr1, pteg, slbe->vsid);
167 
168 	/* When running a PAPR guest, SDR1 contains a HVA address instead
169            of a GPA */
170 	if (vcpu->arch.papr_enabled)
171 		r = pteg;
172 	else
173 		r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
174 
175 	if (kvm_is_error_hva(r))
176 		return r;
177 	return r | (pteg & ~PAGE_MASK);
178 }
179 
180 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
181 {
182 	int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
183 	u64 avpn;
184 
185 	avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
186 	avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
187 
188 	if (p < 16)
189 		avpn >>= ((80 - p) - 56) - 8;	/* 16 - p */
190 	else
191 		avpn <<= p - 16;
192 
193 	return avpn;
194 }
195 
196 /*
197  * Return page size encoded in the second word of a HPTE, or
198  * -1 for an invalid encoding for the base page size indicated by
199  * the SLB entry.  This doesn't handle mixed pagesize segments yet.
200  */
201 static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
202 {
203 	switch (slbe->base_page_size) {
204 	case MMU_PAGE_64K:
205 		if ((r & 0xf000) == 0x1000)
206 			return MMU_PAGE_64K;
207 		break;
208 	case MMU_PAGE_16M:
209 		if ((r & 0xff000) == 0)
210 			return MMU_PAGE_16M;
211 		break;
212 	}
213 	return -1;
214 }
215 
216 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
217 				      struct kvmppc_pte *gpte, bool data,
218 				      bool iswrite)
219 {
220 	struct kvmppc_slb *slbe;
221 	hva_t ptegp;
222 	u64 pteg[16];
223 	u64 avpn = 0;
224 	u64 v, r;
225 	u64 v_val, v_mask;
226 	u64 eaddr_mask;
227 	int i;
228 	u8 pp, key = 0;
229 	bool found = false;
230 	bool second = false;
231 	int pgsize;
232 	ulong mp_ea = vcpu->arch.magic_page_ea;
233 
234 	/* Magic page override */
235 	if (unlikely(mp_ea) &&
236 	    unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
237 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
238 		gpte->eaddr = eaddr;
239 		gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
240 		gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
241 		gpte->raddr &= KVM_PAM;
242 		gpte->may_execute = true;
243 		gpte->may_read = true;
244 		gpte->may_write = true;
245 		gpte->page_size = MMU_PAGE_4K;
246 		gpte->wimg = HPTE_R_M;
247 
248 		return 0;
249 	}
250 
251 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
252 	if (!slbe)
253 		goto no_seg_found;
254 
255 	avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
256 	v_val = avpn & HPTE_V_AVPN;
257 
258 	if (slbe->tb)
259 		v_val |= SLB_VSID_B_1T;
260 	if (slbe->large)
261 		v_val |= HPTE_V_LARGE;
262 	v_val |= HPTE_V_VALID;
263 
264 	v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
265 		HPTE_V_SECONDARY;
266 
267 	pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
268 
269 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
270 
271 do_second:
272 	ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
273 	if (kvm_is_error_hva(ptegp))
274 		goto no_page_found;
275 
276 	if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
277 		printk_ratelimited(KERN_ERR
278 			"KVM: Can't copy data from 0x%lx!\n", ptegp);
279 		goto no_page_found;
280 	}
281 
282 	if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
283 		key = 4;
284 	else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
285 		key = 4;
286 
287 	for (i=0; i<16; i+=2) {
288 		u64 pte0 = be64_to_cpu(pteg[i]);
289 		u64 pte1 = be64_to_cpu(pteg[i + 1]);
290 
291 		/* Check all relevant fields of 1st dword */
292 		if ((pte0 & v_mask) == v_val) {
293 			/* If large page bit is set, check pgsize encoding */
294 			if (slbe->large &&
295 			    (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
296 				pgsize = decode_pagesize(slbe, pte1);
297 				if (pgsize < 0)
298 					continue;
299 			}
300 			found = true;
301 			break;
302 		}
303 	}
304 
305 	if (!found) {
306 		if (second)
307 			goto no_page_found;
308 		v_val |= HPTE_V_SECONDARY;
309 		second = true;
310 		goto do_second;
311 	}
312 
313 	v = be64_to_cpu(pteg[i]);
314 	r = be64_to_cpu(pteg[i+1]);
315 	pp = (r & HPTE_R_PP) | key;
316 	if (r & HPTE_R_PP0)
317 		pp |= 8;
318 
319 	gpte->eaddr = eaddr;
320 	gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
321 
322 	eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
323 	gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
324 	gpte->page_size = pgsize;
325 	gpte->may_execute = ((r & HPTE_R_N) ? false : true);
326 	if (unlikely(vcpu->arch.disable_kernel_nx) &&
327 	    !(kvmppc_get_msr(vcpu) & MSR_PR))
328 		gpte->may_execute = true;
329 	gpte->may_read = false;
330 	gpte->may_write = false;
331 	gpte->wimg = r & HPTE_R_WIMG;
332 
333 	switch (pp) {
334 	case 0:
335 	case 1:
336 	case 2:
337 	case 6:
338 		gpte->may_write = true;
339 		/* fall through */
340 	case 3:
341 	case 5:
342 	case 7:
343 	case 10:
344 		gpte->may_read = true;
345 		break;
346 	}
347 
348 	dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
349 		"-> 0x%lx\n",
350 		eaddr, avpn, gpte->vpage, gpte->raddr);
351 
352 	/* Update PTE R and C bits, so the guest's swapper knows we used the
353 	 * page */
354 	if (gpte->may_read && !(r & HPTE_R_R)) {
355 		/*
356 		 * Set the accessed flag.
357 		 * We have to write this back with a single byte write
358 		 * because another vcpu may be accessing this on
359 		 * non-PAPR platforms such as mac99, and this is
360 		 * what real hardware does.
361 		 */
362                 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
363 		r |= HPTE_R_R;
364 		put_user(r >> 8, addr + 6);
365 	}
366 	if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
367 		/* Set the dirty flag */
368 		/* Use a single byte write */
369                 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
370 		r |= HPTE_R_C;
371 		put_user(r, addr + 7);
372 	}
373 
374 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
375 
376 	if (!gpte->may_read || (iswrite && !gpte->may_write))
377 		return -EPERM;
378 	return 0;
379 
380 no_page_found:
381 	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
382 	return -ENOENT;
383 
384 no_seg_found:
385 	dprintk("KVM MMU: Trigger segment fault\n");
386 	return -EINVAL;
387 }
388 
389 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
390 {
391 	u64 esid, esid_1t;
392 	int slb_nr;
393 	struct kvmppc_slb *slbe;
394 
395 	dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
396 
397 	esid = GET_ESID(rb);
398 	esid_1t = GET_ESID_1T(rb);
399 	slb_nr = rb & 0xfff;
400 
401 	if (slb_nr > vcpu->arch.slb_nr)
402 		return;
403 
404 	slbe = &vcpu->arch.slb[slb_nr];
405 
406 	slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
407 	slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
408 	slbe->esid  = slbe->tb ? esid_1t : esid;
409 	slbe->vsid  = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
410 	slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
411 	slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
412 	slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
413 	slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
414 	slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
415 
416 	slbe->base_page_size = MMU_PAGE_4K;
417 	if (slbe->large) {
418 		if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
419 			switch (rs & SLB_VSID_LP) {
420 			case SLB_VSID_LP_00:
421 				slbe->base_page_size = MMU_PAGE_16M;
422 				break;
423 			case SLB_VSID_LP_01:
424 				slbe->base_page_size = MMU_PAGE_64K;
425 				break;
426 			}
427 		} else
428 			slbe->base_page_size = MMU_PAGE_16M;
429 	}
430 
431 	slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
432 	slbe->origv = rs;
433 
434 	/* Map the new segment */
435 	kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
436 }
437 
438 static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
439 				       ulong *ret_slb)
440 {
441 	struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
442 
443 	if (slbe) {
444 		*ret_slb = slbe->origv;
445 		return 0;
446 	}
447 	*ret_slb = 0;
448 	return -ENOENT;
449 }
450 
451 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
452 {
453 	struct kvmppc_slb *slbe;
454 
455 	if (slb_nr > vcpu->arch.slb_nr)
456 		return 0;
457 
458 	slbe = &vcpu->arch.slb[slb_nr];
459 
460 	return slbe->orige;
461 }
462 
463 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
464 {
465 	struct kvmppc_slb *slbe;
466 
467 	if (slb_nr > vcpu->arch.slb_nr)
468 		return 0;
469 
470 	slbe = &vcpu->arch.slb[slb_nr];
471 
472 	return slbe->origv;
473 }
474 
475 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
476 {
477 	struct kvmppc_slb *slbe;
478 	u64 seg_size;
479 
480 	dprintk("KVM MMU: slbie(0x%llx)\n", ea);
481 
482 	slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
483 
484 	if (!slbe)
485 		return;
486 
487 	dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
488 
489 	slbe->valid = false;
490 	slbe->orige = 0;
491 	slbe->origv = 0;
492 
493 	seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
494 	kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
495 }
496 
497 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
498 {
499 	int i;
500 
501 	dprintk("KVM MMU: slbia()\n");
502 
503 	for (i = 1; i < vcpu->arch.slb_nr; i++) {
504 		vcpu->arch.slb[i].valid = false;
505 		vcpu->arch.slb[i].orige = 0;
506 		vcpu->arch.slb[i].origv = 0;
507 	}
508 
509 	if (kvmppc_get_msr(vcpu) & MSR_IR) {
510 		kvmppc_mmu_flush_segments(vcpu);
511 		kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
512 	}
513 }
514 
515 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
516 					ulong value)
517 {
518 	u64 rb = 0, rs = 0;
519 
520 	/*
521 	 * According to Book3 2.01 mtsrin is implemented as:
522 	 *
523 	 * The SLB entry specified by (RB)32:35 is loaded from register
524 	 * RS, as follows.
525 	 *
526 	 * SLBE Bit	Source			SLB Field
527 	 *
528 	 * 0:31		0x0000_0000		ESID-0:31
529 	 * 32:35	(RB)32:35		ESID-32:35
530 	 * 36		0b1			V
531 	 * 37:61	0x00_0000|| 0b0		VSID-0:24
532 	 * 62:88	(RS)37:63		VSID-25:51
533 	 * 89:91	(RS)33:35		Ks Kp N
534 	 * 92		(RS)36			L ((RS)36 must be 0b0)
535 	 * 93		0b0			C
536 	 */
537 
538 	dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
539 
540 	/* ESID = srnum */
541 	rb |= (srnum & 0xf) << 28;
542 	/* Set the valid bit */
543 	rb |= 1 << 27;
544 	/* Index = ESID */
545 	rb |= srnum;
546 
547 	/* VSID = VSID */
548 	rs |= (value & 0xfffffff) << 12;
549 	/* flags = flags */
550 	rs |= ((value >> 28) & 0x7) << 9;
551 
552 	kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
553 }
554 
555 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
556 				       bool large)
557 {
558 	u64 mask = 0xFFFFFFFFFULL;
559 	long i;
560 	struct kvm_vcpu *v;
561 
562 	dprintk("KVM MMU: tlbie(0x%lx)\n", va);
563 
564 	/*
565 	 * The tlbie instruction changed behaviour starting with
566 	 * POWER6.  POWER6 and later don't have the large page flag
567 	 * in the instruction but in the RB value, along with bits
568 	 * indicating page and segment sizes.
569 	 */
570 	if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
571 		/* POWER6 or later */
572 		if (va & 1) {		/* L bit */
573 			if ((va & 0xf000) == 0x1000)
574 				mask = 0xFFFFFFFF0ULL;	/* 64k page */
575 			else
576 				mask = 0xFFFFFF000ULL;	/* 16M page */
577 		}
578 	} else {
579 		/* older processors, e.g. PPC970 */
580 		if (large)
581 			mask = 0xFFFFFF000ULL;
582 	}
583 	/* flush this VA on all vcpus */
584 	kvm_for_each_vcpu(i, v, vcpu->kvm)
585 		kvmppc_mmu_pte_vflush(v, va >> 12, mask);
586 }
587 
588 #ifdef CONFIG_PPC_64K_PAGES
589 static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
590 {
591 	ulong mp_ea = vcpu->arch.magic_page_ea;
592 
593 	return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
594 		(mp_ea >> SID_SHIFT) == esid;
595 }
596 #endif
597 
598 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
599 					     u64 *vsid)
600 {
601 	ulong ea = esid << SID_SHIFT;
602 	struct kvmppc_slb *slb;
603 	u64 gvsid = esid;
604 	ulong mp_ea = vcpu->arch.magic_page_ea;
605 	int pagesize = MMU_PAGE_64K;
606 	u64 msr = kvmppc_get_msr(vcpu);
607 
608 	if (msr & (MSR_DR|MSR_IR)) {
609 		slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
610 		if (slb) {
611 			gvsid = slb->vsid;
612 			pagesize = slb->base_page_size;
613 			if (slb->tb) {
614 				gvsid <<= SID_SHIFT_1T - SID_SHIFT;
615 				gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
616 				gvsid |= VSID_1T;
617 			}
618 		}
619 	}
620 
621 	switch (msr & (MSR_DR|MSR_IR)) {
622 	case 0:
623 		gvsid = VSID_REAL | esid;
624 		break;
625 	case MSR_IR:
626 		gvsid |= VSID_REAL_IR;
627 		break;
628 	case MSR_DR:
629 		gvsid |= VSID_REAL_DR;
630 		break;
631 	case MSR_DR|MSR_IR:
632 		if (!slb)
633 			goto no_slb;
634 
635 		break;
636 	default:
637 		BUG();
638 		break;
639 	}
640 
641 #ifdef CONFIG_PPC_64K_PAGES
642 	/*
643 	 * Mark this as a 64k segment if the host is using
644 	 * 64k pages, the host MMU supports 64k pages and
645 	 * the guest segment page size is >= 64k,
646 	 * but not if this segment contains the magic page.
647 	 */
648 	if (pagesize >= MMU_PAGE_64K &&
649 	    mmu_psize_defs[MMU_PAGE_64K].shift &&
650 	    !segment_contains_magic_page(vcpu, esid))
651 		gvsid |= VSID_64K;
652 #endif
653 
654 	if (kvmppc_get_msr(vcpu) & MSR_PR)
655 		gvsid |= VSID_PR;
656 
657 	*vsid = gvsid;
658 	return 0;
659 
660 no_slb:
661 	/* Catch magic page case */
662 	if (unlikely(mp_ea) &&
663 	    unlikely(esid == (mp_ea >> SID_SHIFT)) &&
664 	    !(kvmppc_get_msr(vcpu) & MSR_PR)) {
665 		*vsid = VSID_REAL | esid;
666 		return 0;
667 	}
668 
669 	return -EINVAL;
670 }
671 
672 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
673 {
674 	return (to_book3s(vcpu)->hid[5] & 0x80);
675 }
676 
677 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
678 {
679 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
680 
681 	mmu->mfsrin = NULL;
682 	mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
683 	mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
684 	mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
685 	mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
686 	mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
687 	mmu->slbie = kvmppc_mmu_book3s_64_slbie;
688 	mmu->slbia = kvmppc_mmu_book3s_64_slbia;
689 	mmu->xlate = kvmppc_mmu_book3s_64_xlate;
690 	mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
691 	mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
692 	mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
693 	mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
694 	mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
695 
696 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
697 }
698