xref: /openbmc/linux/arch/powerpc/kvm/book3s_hv_rm_mmu.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
16 
17 #include <asm/tlbflush.h>
18 #include <asm/trace.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/book3s/64/mmu-hash.h>
22 #include <asm/hvcall.h>
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/pte-walk.h>
26 
27 /* Translate address of a vmalloc'd thing to a linear map address */
28 static void *real_vmalloc_addr(void *x)
29 {
30 	unsigned long addr = (unsigned long) x;
31 	pte_t *p;
32 	/*
33 	 * assume we don't have huge pages in vmalloc space...
34 	 * So don't worry about THP collapse/split. Called
35 	 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
36 	 */
37 	p = find_init_mm_pte(addr, NULL);
38 	if (!p || !pte_present(*p))
39 		return NULL;
40 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
41 	return __va(addr);
42 }
43 
44 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
45 static int global_invalidates(struct kvm *kvm)
46 {
47 	int global;
48 	int cpu;
49 
50 	/*
51 	 * If there is only one vcore, and it's currently running,
52 	 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
53 	 * we can use tlbiel as long as we mark all other physical
54 	 * cores as potentially having stale TLB entries for this lpid.
55 	 * Otherwise, don't use tlbiel.
56 	 */
57 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
58 		global = 0;
59 	else
60 		global = 1;
61 
62 	if (!global) {
63 		/* any other core might now have stale TLB entries... */
64 		smp_wmb();
65 		cpumask_setall(&kvm->arch.need_tlb_flush);
66 		cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
67 		/*
68 		 * On POWER9, threads are independent but the TLB is shared,
69 		 * so use the bit for the first thread to represent the core.
70 		 */
71 		if (cpu_has_feature(CPU_FTR_ARCH_300))
72 			cpu = cpu_first_thread_sibling(cpu);
73 		cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
74 	}
75 
76 	return global;
77 }
78 
79 /*
80  * Add this HPTE into the chain for the real page.
81  * Must be called with the chain locked; it unlocks the chain.
82  */
83 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
84 			     unsigned long *rmap, long pte_index, int realmode)
85 {
86 	struct revmap_entry *head, *tail;
87 	unsigned long i;
88 
89 	if (*rmap & KVMPPC_RMAP_PRESENT) {
90 		i = *rmap & KVMPPC_RMAP_INDEX;
91 		head = &kvm->arch.hpt.rev[i];
92 		if (realmode)
93 			head = real_vmalloc_addr(head);
94 		tail = &kvm->arch.hpt.rev[head->back];
95 		if (realmode)
96 			tail = real_vmalloc_addr(tail);
97 		rev->forw = i;
98 		rev->back = head->back;
99 		tail->forw = pte_index;
100 		head->back = pte_index;
101 	} else {
102 		rev->forw = rev->back = pte_index;
103 		*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
104 			pte_index | KVMPPC_RMAP_PRESENT;
105 	}
106 	unlock_rmap(rmap);
107 }
108 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
109 
110 /* Update the dirty bitmap of a memslot */
111 void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
112 			     unsigned long gfn, unsigned long psize)
113 {
114 	unsigned long npages;
115 
116 	if (!psize || !memslot->dirty_bitmap)
117 		return;
118 	npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
119 	gfn -= memslot->base_gfn;
120 	set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
121 }
122 EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map);
123 
124 static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
125 				unsigned long hpte_v, unsigned long hpte_gr)
126 {
127 	struct kvm_memory_slot *memslot;
128 	unsigned long gfn;
129 	unsigned long psize;
130 
131 	psize = kvmppc_actual_pgsz(hpte_v, hpte_gr);
132 	gfn = hpte_rpn(hpte_gr, psize);
133 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
134 	if (memslot && memslot->dirty_bitmap)
135 		kvmppc_update_dirty_map(memslot, gfn, psize);
136 }
137 
138 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
139 static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
140 				      unsigned long hpte_gr,
141 				      struct kvm_memory_slot **memslotp,
142 				      unsigned long *gfnp)
143 {
144 	struct kvm_memory_slot *memslot;
145 	unsigned long *rmap;
146 	unsigned long gfn;
147 
148 	gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
149 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
150 	if (memslotp)
151 		*memslotp = memslot;
152 	if (gfnp)
153 		*gfnp = gfn;
154 	if (!memslot)
155 		return NULL;
156 
157 	rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
158 	return rmap;
159 }
160 
161 /* Remove this HPTE from the chain for a real page */
162 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
163 				struct revmap_entry *rev,
164 				unsigned long hpte_v, unsigned long hpte_r)
165 {
166 	struct revmap_entry *next, *prev;
167 	unsigned long ptel, head;
168 	unsigned long *rmap;
169 	unsigned long rcbits;
170 	struct kvm_memory_slot *memslot;
171 	unsigned long gfn;
172 
173 	rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
174 	ptel = rev->guest_rpte |= rcbits;
175 	rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
176 	if (!rmap)
177 		return;
178 	lock_rmap(rmap);
179 
180 	head = *rmap & KVMPPC_RMAP_INDEX;
181 	next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
182 	prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
183 	next->back = rev->back;
184 	prev->forw = rev->forw;
185 	if (head == pte_index) {
186 		head = rev->forw;
187 		if (head == pte_index)
188 			*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
189 		else
190 			*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
191 	}
192 	*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
193 	if (rcbits & HPTE_R_C)
194 		kvmppc_update_dirty_map(memslot, gfn,
195 					kvmppc_actual_pgsz(hpte_v, hpte_r));
196 	unlock_rmap(rmap);
197 }
198 
199 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
200 		       long pte_index, unsigned long pteh, unsigned long ptel,
201 		       pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
202 {
203 	unsigned long i, pa, gpa, gfn, psize;
204 	unsigned long slot_fn, hva;
205 	__be64 *hpte;
206 	struct revmap_entry *rev;
207 	unsigned long g_ptel;
208 	struct kvm_memory_slot *memslot;
209 	unsigned hpage_shift;
210 	bool is_ci;
211 	unsigned long *rmap;
212 	pte_t *ptep;
213 	unsigned int writing;
214 	unsigned long mmu_seq;
215 	unsigned long rcbits, irq_flags = 0;
216 
217 	if (kvm_is_radix(kvm))
218 		return H_FUNCTION;
219 	psize = kvmppc_actual_pgsz(pteh, ptel);
220 	if (!psize)
221 		return H_PARAMETER;
222 	writing = hpte_is_writable(ptel);
223 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
224 	ptel &= ~HPTE_GR_RESERVED;
225 	g_ptel = ptel;
226 
227 	/* used later to detect if we might have been invalidated */
228 	mmu_seq = kvm->mmu_notifier_seq;
229 	smp_rmb();
230 
231 	/* Find the memslot (if any) for this address */
232 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
233 	gfn = gpa >> PAGE_SHIFT;
234 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
235 	pa = 0;
236 	is_ci = false;
237 	rmap = NULL;
238 	if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
239 		/* Emulated MMIO - mark this with key=31 */
240 		pteh |= HPTE_V_ABSENT;
241 		ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
242 		goto do_insert;
243 	}
244 
245 	/* Check if the requested page fits entirely in the memslot. */
246 	if (!slot_is_aligned(memslot, psize))
247 		return H_PARAMETER;
248 	slot_fn = gfn - memslot->base_gfn;
249 	rmap = &memslot->arch.rmap[slot_fn];
250 
251 	/* Translate to host virtual address */
252 	hva = __gfn_to_hva_memslot(memslot, gfn);
253 	/*
254 	 * If we had a page table table change after lookup, we would
255 	 * retry via mmu_notifier_retry.
256 	 */
257 	if (!realmode)
258 		local_irq_save(irq_flags);
259 	/*
260 	 * If called in real mode we have MSR_EE = 0. Otherwise
261 	 * we disable irq above.
262 	 */
263 	ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
264 	if (ptep) {
265 		pte_t pte;
266 		unsigned int host_pte_size;
267 
268 		if (hpage_shift)
269 			host_pte_size = 1ul << hpage_shift;
270 		else
271 			host_pte_size = PAGE_SIZE;
272 		/*
273 		 * We should always find the guest page size
274 		 * to <= host page size, if host is using hugepage
275 		 */
276 		if (host_pte_size < psize) {
277 			if (!realmode)
278 				local_irq_restore(flags);
279 			return H_PARAMETER;
280 		}
281 		pte = kvmppc_read_update_linux_pte(ptep, writing);
282 		if (pte_present(pte) && !pte_protnone(pte)) {
283 			if (writing && !__pte_write(pte))
284 				/* make the actual HPTE be read-only */
285 				ptel = hpte_make_readonly(ptel);
286 			is_ci = pte_ci(pte);
287 			pa = pte_pfn(pte) << PAGE_SHIFT;
288 			pa |= hva & (host_pte_size - 1);
289 			pa |= gpa & ~PAGE_MASK;
290 		}
291 	}
292 	if (!realmode)
293 		local_irq_restore(irq_flags);
294 
295 	ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
296 	ptel |= pa;
297 
298 	if (pa)
299 		pteh |= HPTE_V_VALID;
300 	else {
301 		pteh |= HPTE_V_ABSENT;
302 		ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
303 	}
304 
305 	/*If we had host pte mapping then  Check WIMG */
306 	if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
307 		if (is_ci)
308 			return H_PARAMETER;
309 		/*
310 		 * Allow guest to map emulated device memory as
311 		 * uncacheable, but actually make it cacheable.
312 		 */
313 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
314 		ptel |= HPTE_R_M;
315 	}
316 
317 	/* Find and lock the HPTEG slot to use */
318  do_insert:
319 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
320 		return H_PARAMETER;
321 	if (likely((flags & H_EXACT) == 0)) {
322 		pte_index &= ~7UL;
323 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
324 		for (i = 0; i < 8; ++i) {
325 			if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
326 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
327 					  HPTE_V_ABSENT))
328 				break;
329 			hpte += 2;
330 		}
331 		if (i == 8) {
332 			/*
333 			 * Since try_lock_hpte doesn't retry (not even stdcx.
334 			 * failures), it could be that there is a free slot
335 			 * but we transiently failed to lock it.  Try again,
336 			 * actually locking each slot and checking it.
337 			 */
338 			hpte -= 16;
339 			for (i = 0; i < 8; ++i) {
340 				u64 pte;
341 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
342 					cpu_relax();
343 				pte = be64_to_cpu(hpte[0]);
344 				if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
345 					break;
346 				__unlock_hpte(hpte, pte);
347 				hpte += 2;
348 			}
349 			if (i == 8)
350 				return H_PTEG_FULL;
351 		}
352 		pte_index += i;
353 	} else {
354 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
355 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
356 				   HPTE_V_ABSENT)) {
357 			/* Lock the slot and check again */
358 			u64 pte;
359 
360 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
361 				cpu_relax();
362 			pte = be64_to_cpu(hpte[0]);
363 			if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
364 				__unlock_hpte(hpte, pte);
365 				return H_PTEG_FULL;
366 			}
367 		}
368 	}
369 
370 	/* Save away the guest's idea of the second HPTE dword */
371 	rev = &kvm->arch.hpt.rev[pte_index];
372 	if (realmode)
373 		rev = real_vmalloc_addr(rev);
374 	if (rev) {
375 		rev->guest_rpte = g_ptel;
376 		note_hpte_modification(kvm, rev);
377 	}
378 
379 	/* Link HPTE into reverse-map chain */
380 	if (pteh & HPTE_V_VALID) {
381 		if (realmode)
382 			rmap = real_vmalloc_addr(rmap);
383 		lock_rmap(rmap);
384 		/* Check for pending invalidations under the rmap chain lock */
385 		if (mmu_notifier_retry(kvm, mmu_seq)) {
386 			/* inval in progress, write a non-present HPTE */
387 			pteh |= HPTE_V_ABSENT;
388 			pteh &= ~HPTE_V_VALID;
389 			ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
390 			unlock_rmap(rmap);
391 		} else {
392 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
393 						realmode);
394 			/* Only set R/C in real HPTE if already set in *rmap */
395 			rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
396 			ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
397 		}
398 	}
399 
400 	/* Convert to new format on P9 */
401 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
402 		ptel = hpte_old_to_new_r(pteh, ptel);
403 		pteh = hpte_old_to_new_v(pteh);
404 	}
405 	hpte[1] = cpu_to_be64(ptel);
406 
407 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
408 	eieio();
409 	__unlock_hpte(hpte, pteh);
410 	asm volatile("ptesync" : : : "memory");
411 
412 	*pte_idx_ret = pte_index;
413 	return H_SUCCESS;
414 }
415 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
416 
417 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
418 		    long pte_index, unsigned long pteh, unsigned long ptel)
419 {
420 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
421 				 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
422 }
423 
424 #ifdef __BIG_ENDIAN__
425 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
426 #else
427 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
428 #endif
429 
430 static inline int is_mmio_hpte(unsigned long v, unsigned long r)
431 {
432 	return ((v & HPTE_V_ABSENT) &&
433 		(r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
434 		(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
435 }
436 
437 static inline int try_lock_tlbie(unsigned int *lock)
438 {
439 	unsigned int tmp, old;
440 	unsigned int token = LOCK_TOKEN;
441 
442 	asm volatile("1:lwarx	%1,0,%2\n"
443 		     "	cmpwi	cr0,%1,0\n"
444 		     "	bne	2f\n"
445 		     "  stwcx.	%3,0,%2\n"
446 		     "	bne-	1b\n"
447 		     "  isync\n"
448 		     "2:"
449 		     : "=&r" (tmp), "=&r" (old)
450 		     : "r" (lock), "r" (token)
451 		     : "cc", "memory");
452 	return old == 0;
453 }
454 
455 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
456 		      long npages, int global, bool need_sync)
457 {
458 	long i;
459 
460 	/*
461 	 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
462 	 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
463 	 * the RS field, this is backwards-compatible with P7 and P8.
464 	 */
465 	if (global) {
466 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
467 			cpu_relax();
468 		if (need_sync)
469 			asm volatile("ptesync" : : : "memory");
470 		for (i = 0; i < npages; ++i) {
471 			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
472 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
473 			trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
474 				kvm->arch.lpid, 0, 0, 0);
475 		}
476 
477 		if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
478 			/*
479 			 * Need the extra ptesync to make sure we don't
480 			 * re-order the tlbie
481 			 */
482 			asm volatile("ptesync": : :"memory");
483 			asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
484 				     "r" (rbvalues[0]), "r" (kvm->arch.lpid));
485 		}
486 
487 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
488 		kvm->arch.tlbie_lock = 0;
489 	} else {
490 		if (need_sync)
491 			asm volatile("ptesync" : : : "memory");
492 		for (i = 0; i < npages; ++i) {
493 			asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
494 				     "r" (rbvalues[i]), "r" (0));
495 			trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
496 				0, 0, 0, 0);
497 		}
498 		asm volatile("ptesync" : : : "memory");
499 	}
500 }
501 
502 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
503 			unsigned long pte_index, unsigned long avpn,
504 			unsigned long *hpret)
505 {
506 	__be64 *hpte;
507 	unsigned long v, r, rb;
508 	struct revmap_entry *rev;
509 	u64 pte, orig_pte, pte_r;
510 
511 	if (kvm_is_radix(kvm))
512 		return H_FUNCTION;
513 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
514 		return H_PARAMETER;
515 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
516 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
517 		cpu_relax();
518 	pte = orig_pte = be64_to_cpu(hpte[0]);
519 	pte_r = be64_to_cpu(hpte[1]);
520 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
521 		pte = hpte_new_to_old_v(pte, pte_r);
522 		pte_r = hpte_new_to_old_r(pte_r);
523 	}
524 	if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
525 	    ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
526 	    ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
527 		__unlock_hpte(hpte, orig_pte);
528 		return H_NOT_FOUND;
529 	}
530 
531 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
532 	v = pte & ~HPTE_V_HVLOCK;
533 	if (v & HPTE_V_VALID) {
534 		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
535 		rb = compute_tlbie_rb(v, pte_r, pte_index);
536 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
537 		/*
538 		 * The reference (R) and change (C) bits in a HPT
539 		 * entry can be set by hardware at any time up until
540 		 * the HPTE is invalidated and the TLB invalidation
541 		 * sequence has completed.  This means that when
542 		 * removing a HPTE, we need to re-read the HPTE after
543 		 * the invalidation sequence has completed in order to
544 		 * obtain reliable values of R and C.
545 		 */
546 		remove_revmap_chain(kvm, pte_index, rev, v,
547 				    be64_to_cpu(hpte[1]));
548 	}
549 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
550 	note_hpte_modification(kvm, rev);
551 	unlock_hpte(hpte, 0);
552 
553 	if (is_mmio_hpte(v, pte_r))
554 		atomic64_inc(&kvm->arch.mmio_update);
555 
556 	if (v & HPTE_V_ABSENT)
557 		v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
558 	hpret[0] = v;
559 	hpret[1] = r;
560 	return H_SUCCESS;
561 }
562 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
563 
564 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
565 		     unsigned long pte_index, unsigned long avpn)
566 {
567 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
568 				  &vcpu->arch.gpr[4]);
569 }
570 
571 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
572 {
573 	struct kvm *kvm = vcpu->kvm;
574 	unsigned long *args = &vcpu->arch.gpr[4];
575 	__be64 *hp, *hptes[4];
576 	unsigned long tlbrb[4];
577 	long int i, j, k, n, found, indexes[4];
578 	unsigned long flags, req, pte_index, rcbits;
579 	int global;
580 	long int ret = H_SUCCESS;
581 	struct revmap_entry *rev, *revs[4];
582 	u64 hp0, hp1;
583 
584 	if (kvm_is_radix(kvm))
585 		return H_FUNCTION;
586 	global = global_invalidates(kvm);
587 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
588 		n = 0;
589 		for (; i < 4; ++i) {
590 			j = i * 2;
591 			pte_index = args[j];
592 			flags = pte_index >> 56;
593 			pte_index &= ((1ul << 56) - 1);
594 			req = flags >> 6;
595 			flags &= 3;
596 			if (req == 3) {		/* no more requests */
597 				i = 4;
598 				break;
599 			}
600 			if (req != 1 || flags == 3 ||
601 			    pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
602 				/* parameter error */
603 				args[j] = ((0xa0 | flags) << 56) + pte_index;
604 				ret = H_PARAMETER;
605 				break;
606 			}
607 			hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
608 			/* to avoid deadlock, don't spin except for first */
609 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
610 				if (n)
611 					break;
612 				while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
613 					cpu_relax();
614 			}
615 			found = 0;
616 			hp0 = be64_to_cpu(hp[0]);
617 			hp1 = be64_to_cpu(hp[1]);
618 			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
619 				hp0 = hpte_new_to_old_v(hp0, hp1);
620 				hp1 = hpte_new_to_old_r(hp1);
621 			}
622 			if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
623 				switch (flags & 3) {
624 				case 0:		/* absolute */
625 					found = 1;
626 					break;
627 				case 1:		/* andcond */
628 					if (!(hp0 & args[j + 1]))
629 						found = 1;
630 					break;
631 				case 2:		/* AVPN */
632 					if ((hp0 & ~0x7fUL) == args[j + 1])
633 						found = 1;
634 					break;
635 				}
636 			}
637 			if (!found) {
638 				hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
639 				args[j] = ((0x90 | flags) << 56) + pte_index;
640 				continue;
641 			}
642 
643 			args[j] = ((0x80 | flags) << 56) + pte_index;
644 			rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
645 			note_hpte_modification(kvm, rev);
646 
647 			if (!(hp0 & HPTE_V_VALID)) {
648 				/* insert R and C bits from PTE */
649 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
650 				args[j] |= rcbits << (56 - 5);
651 				hp[0] = 0;
652 				if (is_mmio_hpte(hp0, hp1))
653 					atomic64_inc(&kvm->arch.mmio_update);
654 				continue;
655 			}
656 
657 			/* leave it locked */
658 			hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
659 			tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
660 			indexes[n] = j;
661 			hptes[n] = hp;
662 			revs[n] = rev;
663 			++n;
664 		}
665 
666 		if (!n)
667 			break;
668 
669 		/* Now that we've collected a batch, do the tlbies */
670 		do_tlbies(kvm, tlbrb, n, global, true);
671 
672 		/* Read PTE low words after tlbie to get final R/C values */
673 		for (k = 0; k < n; ++k) {
674 			j = indexes[k];
675 			pte_index = args[j] & ((1ul << 56) - 1);
676 			hp = hptes[k];
677 			rev = revs[k];
678 			remove_revmap_chain(kvm, pte_index, rev,
679 				be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
680 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
681 			args[j] |= rcbits << (56 - 5);
682 			__unlock_hpte(hp, 0);
683 		}
684 	}
685 
686 	return ret;
687 }
688 
689 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
690 		      unsigned long pte_index, unsigned long avpn,
691 		      unsigned long va)
692 {
693 	struct kvm *kvm = vcpu->kvm;
694 	__be64 *hpte;
695 	struct revmap_entry *rev;
696 	unsigned long v, r, rb, mask, bits;
697 	u64 pte_v, pte_r;
698 
699 	if (kvm_is_radix(kvm))
700 		return H_FUNCTION;
701 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
702 		return H_PARAMETER;
703 
704 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
705 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
706 		cpu_relax();
707 	v = pte_v = be64_to_cpu(hpte[0]);
708 	if (cpu_has_feature(CPU_FTR_ARCH_300))
709 		v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
710 	if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
711 	    ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
712 		__unlock_hpte(hpte, pte_v);
713 		return H_NOT_FOUND;
714 	}
715 
716 	pte_r = be64_to_cpu(hpte[1]);
717 	bits = (flags << 55) & HPTE_R_PP0;
718 	bits |= (flags << 48) & HPTE_R_KEY_HI;
719 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
720 
721 	/* Update guest view of 2nd HPTE dword */
722 	mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
723 		HPTE_R_KEY_HI | HPTE_R_KEY_LO;
724 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
725 	if (rev) {
726 		r = (rev->guest_rpte & ~mask) | bits;
727 		rev->guest_rpte = r;
728 		note_hpte_modification(kvm, rev);
729 	}
730 
731 	/* Update HPTE */
732 	if (v & HPTE_V_VALID) {
733 		/*
734 		 * If the page is valid, don't let it transition from
735 		 * readonly to writable.  If it should be writable, we'll
736 		 * take a trap and let the page fault code sort it out.
737 		 */
738 		r = (pte_r & ~mask) | bits;
739 		if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
740 			r = hpte_make_readonly(r);
741 		/* If the PTE is changing, invalidate it first */
742 		if (r != pte_r) {
743 			rb = compute_tlbie_rb(v, r, pte_index);
744 			hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
745 					      HPTE_V_ABSENT);
746 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
747 			/* Don't lose R/C bit updates done by hardware */
748 			r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
749 			hpte[1] = cpu_to_be64(r);
750 		}
751 	}
752 	unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
753 	asm volatile("ptesync" : : : "memory");
754 	if (is_mmio_hpte(v, pte_r))
755 		atomic64_inc(&kvm->arch.mmio_update);
756 
757 	return H_SUCCESS;
758 }
759 
760 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
761 		   unsigned long pte_index)
762 {
763 	struct kvm *kvm = vcpu->kvm;
764 	__be64 *hpte;
765 	unsigned long v, r;
766 	int i, n = 1;
767 	struct revmap_entry *rev = NULL;
768 
769 	if (kvm_is_radix(kvm))
770 		return H_FUNCTION;
771 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
772 		return H_PARAMETER;
773 	if (flags & H_READ_4) {
774 		pte_index &= ~3;
775 		n = 4;
776 	}
777 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
778 	for (i = 0; i < n; ++i, ++pte_index) {
779 		hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
780 		v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
781 		r = be64_to_cpu(hpte[1]);
782 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
783 			v = hpte_new_to_old_v(v, r);
784 			r = hpte_new_to_old_r(r);
785 		}
786 		if (v & HPTE_V_ABSENT) {
787 			v &= ~HPTE_V_ABSENT;
788 			v |= HPTE_V_VALID;
789 		}
790 		if (v & HPTE_V_VALID) {
791 			r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
792 			r &= ~HPTE_GR_RESERVED;
793 		}
794 		vcpu->arch.gpr[4 + i * 2] = v;
795 		vcpu->arch.gpr[5 + i * 2] = r;
796 	}
797 	return H_SUCCESS;
798 }
799 
800 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
801 			unsigned long pte_index)
802 {
803 	struct kvm *kvm = vcpu->kvm;
804 	__be64 *hpte;
805 	unsigned long v, r, gr;
806 	struct revmap_entry *rev;
807 	unsigned long *rmap;
808 	long ret = H_NOT_FOUND;
809 
810 	if (kvm_is_radix(kvm))
811 		return H_FUNCTION;
812 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
813 		return H_PARAMETER;
814 
815 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
816 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
817 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
818 		cpu_relax();
819 	v = be64_to_cpu(hpte[0]);
820 	r = be64_to_cpu(hpte[1]);
821 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
822 		goto out;
823 
824 	gr = rev->guest_rpte;
825 	if (rev->guest_rpte & HPTE_R_R) {
826 		rev->guest_rpte &= ~HPTE_R_R;
827 		note_hpte_modification(kvm, rev);
828 	}
829 	if (v & HPTE_V_VALID) {
830 		gr |= r & (HPTE_R_R | HPTE_R_C);
831 		if (r & HPTE_R_R) {
832 			kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
833 			rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
834 			if (rmap) {
835 				lock_rmap(rmap);
836 				*rmap |= KVMPPC_RMAP_REFERENCED;
837 				unlock_rmap(rmap);
838 			}
839 		}
840 	}
841 	vcpu->arch.gpr[4] = gr;
842 	ret = H_SUCCESS;
843  out:
844 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
845 	return ret;
846 }
847 
848 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
849 			unsigned long pte_index)
850 {
851 	struct kvm *kvm = vcpu->kvm;
852 	__be64 *hpte;
853 	unsigned long v, r, gr;
854 	struct revmap_entry *rev;
855 	long ret = H_NOT_FOUND;
856 
857 	if (kvm_is_radix(kvm))
858 		return H_FUNCTION;
859 	if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
860 		return H_PARAMETER;
861 
862 	rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
863 	hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
864 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
865 		cpu_relax();
866 	v = be64_to_cpu(hpte[0]);
867 	r = be64_to_cpu(hpte[1]);
868 	if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
869 		goto out;
870 
871 	gr = rev->guest_rpte;
872 	if (gr & HPTE_R_C) {
873 		rev->guest_rpte &= ~HPTE_R_C;
874 		note_hpte_modification(kvm, rev);
875 	}
876 	if (v & HPTE_V_VALID) {
877 		/* need to make it temporarily absent so C is stable */
878 		hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
879 		kvmppc_invalidate_hpte(kvm, hpte, pte_index);
880 		r = be64_to_cpu(hpte[1]);
881 		gr |= r & (HPTE_R_R | HPTE_R_C);
882 		if (r & HPTE_R_C) {
883 			hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
884 			eieio();
885 			kvmppc_set_dirty_from_hpte(kvm, v, gr);
886 		}
887 	}
888 	vcpu->arch.gpr[4] = gr;
889 	ret = H_SUCCESS;
890  out:
891 	unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
892 	return ret;
893 }
894 
895 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
896 			unsigned long pte_index)
897 {
898 	unsigned long rb;
899 	u64 hp0, hp1;
900 
901 	hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
902 	hp0 = be64_to_cpu(hptep[0]);
903 	hp1 = be64_to_cpu(hptep[1]);
904 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
905 		hp0 = hpte_new_to_old_v(hp0, hp1);
906 		hp1 = hpte_new_to_old_r(hp1);
907 	}
908 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
909 	do_tlbies(kvm, &rb, 1, 1, true);
910 }
911 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
912 
913 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
914 			   unsigned long pte_index)
915 {
916 	unsigned long rb;
917 	unsigned char rbyte;
918 	u64 hp0, hp1;
919 
920 	hp0 = be64_to_cpu(hptep[0]);
921 	hp1 = be64_to_cpu(hptep[1]);
922 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
923 		hp0 = hpte_new_to_old_v(hp0, hp1);
924 		hp1 = hpte_new_to_old_r(hp1);
925 	}
926 	rb = compute_tlbie_rb(hp0, hp1, pte_index);
927 	rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
928 	/* modify only the second-last byte, which contains the ref bit */
929 	*((char *)hptep + 14) = rbyte;
930 	do_tlbies(kvm, &rb, 1, 1, false);
931 }
932 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
933 
934 static int slb_base_page_shift[4] = {
935 	24,	/* 16M */
936 	16,	/* 64k */
937 	34,	/* 16G */
938 	20,	/* 1M, unsupported */
939 };
940 
941 static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
942 		unsigned long eaddr, unsigned long slb_v, long mmio_update)
943 {
944 	struct mmio_hpte_cache_entry *entry = NULL;
945 	unsigned int pshift;
946 	unsigned int i;
947 
948 	for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
949 		entry = &vcpu->arch.mmio_cache.entry[i];
950 		if (entry->mmio_update == mmio_update) {
951 			pshift = entry->slb_base_pshift;
952 			if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
953 			    entry->slb_v == slb_v)
954 				return entry;
955 		}
956 	}
957 	return NULL;
958 }
959 
960 static struct mmio_hpte_cache_entry *
961 			next_mmio_cache_entry(struct kvm_vcpu *vcpu)
962 {
963 	unsigned int index = vcpu->arch.mmio_cache.index;
964 
965 	vcpu->arch.mmio_cache.index++;
966 	if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
967 		vcpu->arch.mmio_cache.index = 0;
968 
969 	return &vcpu->arch.mmio_cache.entry[index];
970 }
971 
972 /* When called from virtmode, this func should be protected by
973  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
974  * can trigger deadlock issue.
975  */
976 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
977 			      unsigned long valid)
978 {
979 	unsigned int i;
980 	unsigned int pshift;
981 	unsigned long somask;
982 	unsigned long vsid, hash;
983 	unsigned long avpn;
984 	__be64 *hpte;
985 	unsigned long mask, val;
986 	unsigned long v, r, orig_v;
987 
988 	/* Get page shift, work out hash and AVPN etc. */
989 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
990 	val = 0;
991 	pshift = 12;
992 	if (slb_v & SLB_VSID_L) {
993 		mask |= HPTE_V_LARGE;
994 		val |= HPTE_V_LARGE;
995 		pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
996 	}
997 	if (slb_v & SLB_VSID_B_1T) {
998 		somask = (1UL << 40) - 1;
999 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
1000 		vsid ^= vsid << 25;
1001 	} else {
1002 		somask = (1UL << 28) - 1;
1003 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
1004 	}
1005 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
1006 	avpn = slb_v & ~(somask >> 16);	/* also includes B */
1007 	avpn |= (eaddr & somask) >> 16;
1008 
1009 	if (pshift >= 24)
1010 		avpn &= ~((1UL << (pshift - 16)) - 1);
1011 	else
1012 		avpn &= ~0x7fUL;
1013 	val |= avpn;
1014 
1015 	for (;;) {
1016 		hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
1017 
1018 		for (i = 0; i < 16; i += 2) {
1019 			/* Read the PTE racily */
1020 			v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1021 			if (cpu_has_feature(CPU_FTR_ARCH_300))
1022 				v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
1023 
1024 			/* Check valid/absent, hash, segment size and AVPN */
1025 			if (!(v & valid) || (v & mask) != val)
1026 				continue;
1027 
1028 			/* Lock the PTE and read it under the lock */
1029 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1030 				cpu_relax();
1031 			v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1032 			r = be64_to_cpu(hpte[i+1]);
1033 			if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1034 				v = hpte_new_to_old_v(v, r);
1035 				r = hpte_new_to_old_r(r);
1036 			}
1037 
1038 			/*
1039 			 * Check the HPTE again, including base page size
1040 			 */
1041 			if ((v & valid) && (v & mask) == val &&
1042 			    kvmppc_hpte_base_page_shift(v, r) == pshift)
1043 				/* Return with the HPTE still locked */
1044 				return (hash << 3) + (i >> 1);
1045 
1046 			__unlock_hpte(&hpte[i], orig_v);
1047 		}
1048 
1049 		if (val & HPTE_V_SECONDARY)
1050 			break;
1051 		val |= HPTE_V_SECONDARY;
1052 		hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
1053 	}
1054 	return -1;
1055 }
1056 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1057 
1058 /*
1059  * Called in real mode to check whether an HPTE not found fault
1060  * is due to accessing a paged-out page or an emulated MMIO page,
1061  * or if a protection fault is due to accessing a page that the
1062  * guest wanted read/write access to but which we made read-only.
1063  * Returns a possibly modified status (DSISR) value if not
1064  * (i.e. pass the interrupt to the guest),
1065  * -1 to pass the fault up to host kernel mode code, -2 to do that
1066  * and also load the instruction word (for MMIO emulation),
1067  * or 0 if we should make the guest retry the access.
1068  */
1069 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1070 			  unsigned long slb_v, unsigned int status, bool data)
1071 {
1072 	struct kvm *kvm = vcpu->kvm;
1073 	long int index;
1074 	unsigned long v, r, gr, orig_v;
1075 	__be64 *hpte;
1076 	unsigned long valid;
1077 	struct revmap_entry *rev;
1078 	unsigned long pp, key;
1079 	struct mmio_hpte_cache_entry *cache_entry = NULL;
1080 	long mmio_update = 0;
1081 
1082 	/* For protection fault, expect to find a valid HPTE */
1083 	valid = HPTE_V_VALID;
1084 	if (status & DSISR_NOHPTE) {
1085 		valid |= HPTE_V_ABSENT;
1086 		mmio_update = atomic64_read(&kvm->arch.mmio_update);
1087 		cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
1088 	}
1089 	if (cache_entry) {
1090 		index = cache_entry->pte_index;
1091 		v = cache_entry->hpte_v;
1092 		r = cache_entry->hpte_r;
1093 		gr = cache_entry->rpte;
1094 	} else {
1095 		index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1096 		if (index < 0) {
1097 			if (status & DSISR_NOHPTE)
1098 				return status;	/* there really was no HPTE */
1099 			return 0;	/* for prot fault, HPTE disappeared */
1100 		}
1101 		hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
1102 		v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1103 		r = be64_to_cpu(hpte[1]);
1104 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1105 			v = hpte_new_to_old_v(v, r);
1106 			r = hpte_new_to_old_r(r);
1107 		}
1108 		rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
1109 		gr = rev->guest_rpte;
1110 
1111 		unlock_hpte(hpte, orig_v);
1112 	}
1113 
1114 	/* For not found, if the HPTE is valid by now, retry the instruction */
1115 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
1116 		return 0;
1117 
1118 	/* Check access permissions to the page */
1119 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1120 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
1121 	status &= ~DSISR_NOHPTE;	/* DSISR_NOHPTE == SRR1_ISI_NOPT */
1122 	if (!data) {
1123 		if (gr & (HPTE_R_N | HPTE_R_G))
1124 			return status | SRR1_ISI_N_OR_G;
1125 		if (!hpte_read_permission(pp, slb_v & key))
1126 			return status | SRR1_ISI_PROT;
1127 	} else if (status & DSISR_ISSTORE) {
1128 		/* check write permission */
1129 		if (!hpte_write_permission(pp, slb_v & key))
1130 			return status | DSISR_PROTFAULT;
1131 	} else {
1132 		if (!hpte_read_permission(pp, slb_v & key))
1133 			return status | DSISR_PROTFAULT;
1134 	}
1135 
1136 	/* Check storage key, if applicable */
1137 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
1138 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1139 		if (status & DSISR_ISSTORE)
1140 			perm >>= 1;
1141 		if (perm & 1)
1142 			return status | DSISR_KEYFAULT;
1143 	}
1144 
1145 	/* Save HPTE info for virtual-mode handler */
1146 	vcpu->arch.pgfault_addr = addr;
1147 	vcpu->arch.pgfault_index = index;
1148 	vcpu->arch.pgfault_hpte[0] = v;
1149 	vcpu->arch.pgfault_hpte[1] = r;
1150 	vcpu->arch.pgfault_cache = cache_entry;
1151 
1152 	/* Check the storage key to see if it is possibly emulated MMIO */
1153 	if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1154 	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1155 		if (!cache_entry) {
1156 			unsigned int pshift = 12;
1157 			unsigned int pshift_index;
1158 
1159 			if (slb_v & SLB_VSID_L) {
1160 				pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1161 				pshift = slb_base_page_shift[pshift_index];
1162 			}
1163 			cache_entry = next_mmio_cache_entry(vcpu);
1164 			cache_entry->eaddr = addr;
1165 			cache_entry->slb_base_pshift = pshift;
1166 			cache_entry->pte_index = index;
1167 			cache_entry->hpte_v = v;
1168 			cache_entry->hpte_r = r;
1169 			cache_entry->rpte = gr;
1170 			cache_entry->slb_v = slb_v;
1171 			cache_entry->mmio_update = mmio_update;
1172 		}
1173 		if (data && (vcpu->arch.shregs.msr & MSR_IR))
1174 			return -2;	/* MMIO emulation - load instr word */
1175 	}
1176 
1177 	return -1;		/* send fault up to host kernel mode */
1178 }
1179