1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
23 
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x)
26 {
27 	unsigned long addr = (unsigned long) x;
28 	pte_t *p;
29 
30 	p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
31 	if (!p || !pte_present(*p))
32 		return NULL;
33 	/* assume we don't have huge pages in vmalloc space... */
34 	addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 	return __va(addr);
36 }
37 
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm *kvm, unsigned long flags)
40 {
41 	int global;
42 
43 	/*
44 	 * If there is only one vcore, and it's currently running,
45 	 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 	 * we can use tlbiel as long as we mark all other physical
47 	 * cores as potentially having stale TLB entries for this lpid.
48 	 * If we're not using MMU notifiers, we never take pages away
49 	 * from the guest, so we can use tlbiel if requested.
50 	 * Otherwise, don't use tlbiel.
51 	 */
52 	if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
53 		global = 0;
54 	else if (kvm->arch.using_mmu_notifiers)
55 		global = 1;
56 	else
57 		global = !(flags & H_LOCAL);
58 
59 	if (!global) {
60 		/* any other core might now have stale TLB entries... */
61 		smp_wmb();
62 		cpumask_setall(&kvm->arch.need_tlb_flush);
63 		cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
64 				  &kvm->arch.need_tlb_flush);
65 	}
66 
67 	return global;
68 }
69 
70 /*
71  * Add this HPTE into the chain for the real page.
72  * Must be called with the chain locked; it unlocks the chain.
73  */
74 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
75 			     unsigned long *rmap, long pte_index, int realmode)
76 {
77 	struct revmap_entry *head, *tail;
78 	unsigned long i;
79 
80 	if (*rmap & KVMPPC_RMAP_PRESENT) {
81 		i = *rmap & KVMPPC_RMAP_INDEX;
82 		head = &kvm->arch.revmap[i];
83 		if (realmode)
84 			head = real_vmalloc_addr(head);
85 		tail = &kvm->arch.revmap[head->back];
86 		if (realmode)
87 			tail = real_vmalloc_addr(tail);
88 		rev->forw = i;
89 		rev->back = head->back;
90 		tail->forw = pte_index;
91 		head->back = pte_index;
92 	} else {
93 		rev->forw = rev->back = pte_index;
94 		*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
95 			pte_index | KVMPPC_RMAP_PRESENT;
96 	}
97 	unlock_rmap(rmap);
98 }
99 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
100 
101 /* Remove this HPTE from the chain for a real page */
102 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
103 				struct revmap_entry *rev,
104 				unsigned long hpte_v, unsigned long hpte_r)
105 {
106 	struct revmap_entry *next, *prev;
107 	unsigned long gfn, ptel, head;
108 	struct kvm_memory_slot *memslot;
109 	unsigned long *rmap;
110 	unsigned long rcbits;
111 
112 	rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
113 	ptel = rev->guest_rpte |= rcbits;
114 	gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
115 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
116 	if (!memslot)
117 		return;
118 
119 	rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
120 	lock_rmap(rmap);
121 
122 	head = *rmap & KVMPPC_RMAP_INDEX;
123 	next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
124 	prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
125 	next->back = rev->back;
126 	prev->forw = rev->forw;
127 	if (head == pte_index) {
128 		head = rev->forw;
129 		if (head == pte_index)
130 			*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
131 		else
132 			*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
133 	}
134 	*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
135 	unlock_rmap(rmap);
136 }
137 
138 static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
139 			      int writing, unsigned long *pte_sizep)
140 {
141 	pte_t *ptep;
142 	unsigned long ps = *pte_sizep;
143 	unsigned int hugepage_shift;
144 
145 	ptep = find_linux_pte_or_hugepte(pgdir, hva, &hugepage_shift);
146 	if (!ptep)
147 		return __pte(0);
148 	if (hugepage_shift)
149 		*pte_sizep = 1ul << hugepage_shift;
150 	else
151 		*pte_sizep = PAGE_SIZE;
152 	if (ps > *pte_sizep)
153 		return __pte(0);
154 	return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
155 }
156 
157 static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
158 {
159 	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
160 	hpte[0] = hpte_v;
161 }
162 
163 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
164 		       long pte_index, unsigned long pteh, unsigned long ptel,
165 		       pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
166 {
167 	unsigned long i, pa, gpa, gfn, psize;
168 	unsigned long slot_fn, hva;
169 	unsigned long *hpte;
170 	struct revmap_entry *rev;
171 	unsigned long g_ptel;
172 	struct kvm_memory_slot *memslot;
173 	unsigned long *physp, pte_size;
174 	unsigned long is_io;
175 	unsigned long *rmap;
176 	pte_t pte;
177 	unsigned int writing;
178 	unsigned long mmu_seq;
179 	unsigned long rcbits;
180 
181 	psize = hpte_page_size(pteh, ptel);
182 	if (!psize)
183 		return H_PARAMETER;
184 	writing = hpte_is_writable(ptel);
185 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
186 	ptel &= ~HPTE_GR_RESERVED;
187 	g_ptel = ptel;
188 
189 	/* used later to detect if we might have been invalidated */
190 	mmu_seq = kvm->mmu_notifier_seq;
191 	smp_rmb();
192 
193 	/* Find the memslot (if any) for this address */
194 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
195 	gfn = gpa >> PAGE_SHIFT;
196 	memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
197 	pa = 0;
198 	is_io = ~0ul;
199 	rmap = NULL;
200 	if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
201 		/* PPC970 can't do emulated MMIO */
202 		if (!cpu_has_feature(CPU_FTR_ARCH_206))
203 			return H_PARAMETER;
204 		/* Emulated MMIO - mark this with key=31 */
205 		pteh |= HPTE_V_ABSENT;
206 		ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
207 		goto do_insert;
208 	}
209 
210 	/* Check if the requested page fits entirely in the memslot. */
211 	if (!slot_is_aligned(memslot, psize))
212 		return H_PARAMETER;
213 	slot_fn = gfn - memslot->base_gfn;
214 	rmap = &memslot->arch.rmap[slot_fn];
215 
216 	if (!kvm->arch.using_mmu_notifiers) {
217 		physp = memslot->arch.slot_phys;
218 		if (!physp)
219 			return H_PARAMETER;
220 		physp += slot_fn;
221 		if (realmode)
222 			physp = real_vmalloc_addr(physp);
223 		pa = *physp;
224 		if (!pa)
225 			return H_TOO_HARD;
226 		is_io = pa & (HPTE_R_I | HPTE_R_W);
227 		pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
228 		pa &= PAGE_MASK;
229 		pa |= gpa & ~PAGE_MASK;
230 	} else {
231 		/* Translate to host virtual address */
232 		hva = __gfn_to_hva_memslot(memslot, gfn);
233 
234 		/* Look up the Linux PTE for the backing page */
235 		pte_size = psize;
236 		pte = lookup_linux_pte_and_update(pgdir, hva, writing,
237 						  &pte_size);
238 		if (pte_present(pte) && !pte_numa(pte)) {
239 			if (writing && !pte_write(pte))
240 				/* make the actual HPTE be read-only */
241 				ptel = hpte_make_readonly(ptel);
242 			is_io = hpte_cache_bits(pte_val(pte));
243 			pa = pte_pfn(pte) << PAGE_SHIFT;
244 			pa |= hva & (pte_size - 1);
245 			pa |= gpa & ~PAGE_MASK;
246 		}
247 	}
248 
249 	if (pte_size < psize)
250 		return H_PARAMETER;
251 
252 	ptel &= ~(HPTE_R_PP0 - psize);
253 	ptel |= pa;
254 
255 	if (pa)
256 		pteh |= HPTE_V_VALID;
257 	else
258 		pteh |= HPTE_V_ABSENT;
259 
260 	/* Check WIMG */
261 	if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
262 		if (is_io)
263 			return H_PARAMETER;
264 		/*
265 		 * Allow guest to map emulated device memory as
266 		 * uncacheable, but actually make it cacheable.
267 		 */
268 		ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
269 		ptel |= HPTE_R_M;
270 	}
271 
272 	/* Find and lock the HPTEG slot to use */
273  do_insert:
274 	if (pte_index >= kvm->arch.hpt_npte)
275 		return H_PARAMETER;
276 	if (likely((flags & H_EXACT) == 0)) {
277 		pte_index &= ~7UL;
278 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
279 		for (i = 0; i < 8; ++i) {
280 			if ((*hpte & HPTE_V_VALID) == 0 &&
281 			    try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
282 					  HPTE_V_ABSENT))
283 				break;
284 			hpte += 2;
285 		}
286 		if (i == 8) {
287 			/*
288 			 * Since try_lock_hpte doesn't retry (not even stdcx.
289 			 * failures), it could be that there is a free slot
290 			 * but we transiently failed to lock it.  Try again,
291 			 * actually locking each slot and checking it.
292 			 */
293 			hpte -= 16;
294 			for (i = 0; i < 8; ++i) {
295 				while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
296 					cpu_relax();
297 				if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
298 					break;
299 				*hpte &= ~HPTE_V_HVLOCK;
300 				hpte += 2;
301 			}
302 			if (i == 8)
303 				return H_PTEG_FULL;
304 		}
305 		pte_index += i;
306 	} else {
307 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
308 		if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
309 				   HPTE_V_ABSENT)) {
310 			/* Lock the slot and check again */
311 			while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
312 				cpu_relax();
313 			if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
314 				*hpte &= ~HPTE_V_HVLOCK;
315 				return H_PTEG_FULL;
316 			}
317 		}
318 	}
319 
320 	/* Save away the guest's idea of the second HPTE dword */
321 	rev = &kvm->arch.revmap[pte_index];
322 	if (realmode)
323 		rev = real_vmalloc_addr(rev);
324 	if (rev) {
325 		rev->guest_rpte = g_ptel;
326 		note_hpte_modification(kvm, rev);
327 	}
328 
329 	/* Link HPTE into reverse-map chain */
330 	if (pteh & HPTE_V_VALID) {
331 		if (realmode)
332 			rmap = real_vmalloc_addr(rmap);
333 		lock_rmap(rmap);
334 		/* Check for pending invalidations under the rmap chain lock */
335 		if (kvm->arch.using_mmu_notifiers &&
336 		    mmu_notifier_retry(kvm, mmu_seq)) {
337 			/* inval in progress, write a non-present HPTE */
338 			pteh |= HPTE_V_ABSENT;
339 			pteh &= ~HPTE_V_VALID;
340 			unlock_rmap(rmap);
341 		} else {
342 			kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
343 						realmode);
344 			/* Only set R/C in real HPTE if already set in *rmap */
345 			rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
346 			ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
347 		}
348 	}
349 
350 	hpte[1] = ptel;
351 
352 	/* Write the first HPTE dword, unlocking the HPTE and making it valid */
353 	eieio();
354 	hpte[0] = pteh;
355 	asm volatile("ptesync" : : : "memory");
356 
357 	*pte_idx_ret = pte_index;
358 	return H_SUCCESS;
359 }
360 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
361 
362 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
363 		    long pte_index, unsigned long pteh, unsigned long ptel)
364 {
365 	return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
366 				 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
367 }
368 
369 #ifdef __BIG_ENDIAN__
370 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
371 #else
372 #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
373 #endif
374 
375 static inline int try_lock_tlbie(unsigned int *lock)
376 {
377 	unsigned int tmp, old;
378 	unsigned int token = LOCK_TOKEN;
379 
380 	asm volatile("1:lwarx	%1,0,%2\n"
381 		     "	cmpwi	cr0,%1,0\n"
382 		     "	bne	2f\n"
383 		     "  stwcx.	%3,0,%2\n"
384 		     "	bne-	1b\n"
385 		     "  isync\n"
386 		     "2:"
387 		     : "=&r" (tmp), "=&r" (old)
388 		     : "r" (lock), "r" (token)
389 		     : "cc", "memory");
390 	return old == 0;
391 }
392 
393 /*
394  * tlbie/tlbiel is a bit different on the PPC970 compared to later
395  * processors such as POWER7; the large page bit is in the instruction
396  * not RB, and the top 16 bits and the bottom 12 bits of the VA
397  * in RB must be 0.
398  */
399 static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
400 			  long npages, int global, bool need_sync)
401 {
402 	long i;
403 
404 	if (global) {
405 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
406 			cpu_relax();
407 		if (need_sync)
408 			asm volatile("ptesync" : : : "memory");
409 		for (i = 0; i < npages; ++i) {
410 			unsigned long rb = rbvalues[i];
411 
412 			if (rb & 1)		/* large page */
413 				asm volatile("tlbie %0,1" : :
414 					     "r" (rb & 0x0000fffffffff000ul));
415 			else
416 				asm volatile("tlbie %0,0" : :
417 					     "r" (rb & 0x0000fffffffff000ul));
418 		}
419 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
420 		kvm->arch.tlbie_lock = 0;
421 	} else {
422 		if (need_sync)
423 			asm volatile("ptesync" : : : "memory");
424 		for (i = 0; i < npages; ++i) {
425 			unsigned long rb = rbvalues[i];
426 
427 			if (rb & 1)		/* large page */
428 				asm volatile("tlbiel %0,1" : :
429 					     "r" (rb & 0x0000fffffffff000ul));
430 			else
431 				asm volatile("tlbiel %0,0" : :
432 					     "r" (rb & 0x0000fffffffff000ul));
433 		}
434 		asm volatile("ptesync" : : : "memory");
435 	}
436 }
437 
438 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
439 		      long npages, int global, bool need_sync)
440 {
441 	long i;
442 
443 	if (cpu_has_feature(CPU_FTR_ARCH_201)) {
444 		/* PPC970 tlbie instruction is a bit different */
445 		do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
446 		return;
447 	}
448 	if (global) {
449 		while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
450 			cpu_relax();
451 		if (need_sync)
452 			asm volatile("ptesync" : : : "memory");
453 		for (i = 0; i < npages; ++i)
454 			asm volatile(PPC_TLBIE(%1,%0) : :
455 				     "r" (rbvalues[i]), "r" (kvm->arch.lpid));
456 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
457 		kvm->arch.tlbie_lock = 0;
458 	} else {
459 		if (need_sync)
460 			asm volatile("ptesync" : : : "memory");
461 		for (i = 0; i < npages; ++i)
462 			asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
463 		asm volatile("ptesync" : : : "memory");
464 	}
465 }
466 
467 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
468 			unsigned long pte_index, unsigned long avpn,
469 			unsigned long *hpret)
470 {
471 	unsigned long *hpte;
472 	unsigned long v, r, rb;
473 	struct revmap_entry *rev;
474 
475 	if (pte_index >= kvm->arch.hpt_npte)
476 		return H_PARAMETER;
477 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
478 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
479 		cpu_relax();
480 	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
481 	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
482 	    ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
483 		hpte[0] &= ~HPTE_V_HVLOCK;
484 		return H_NOT_FOUND;
485 	}
486 
487 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
488 	v = hpte[0] & ~HPTE_V_HVLOCK;
489 	if (v & HPTE_V_VALID) {
490 		hpte[0] &= ~HPTE_V_VALID;
491 		rb = compute_tlbie_rb(v, hpte[1], pte_index);
492 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
493 		/* Read PTE low word after tlbie to get final R/C values */
494 		remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
495 	}
496 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
497 	note_hpte_modification(kvm, rev);
498 	unlock_hpte(hpte, 0);
499 
500 	hpret[0] = v;
501 	hpret[1] = r;
502 	return H_SUCCESS;
503 }
504 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
505 
506 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
507 		     unsigned long pte_index, unsigned long avpn)
508 {
509 	return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
510 				  &vcpu->arch.gpr[4]);
511 }
512 
513 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
514 {
515 	struct kvm *kvm = vcpu->kvm;
516 	unsigned long *args = &vcpu->arch.gpr[4];
517 	unsigned long *hp, *hptes[4], tlbrb[4];
518 	long int i, j, k, n, found, indexes[4];
519 	unsigned long flags, req, pte_index, rcbits;
520 	int global;
521 	long int ret = H_SUCCESS;
522 	struct revmap_entry *rev, *revs[4];
523 
524 	global = global_invalidates(kvm, 0);
525 	for (i = 0; i < 4 && ret == H_SUCCESS; ) {
526 		n = 0;
527 		for (; i < 4; ++i) {
528 			j = i * 2;
529 			pte_index = args[j];
530 			flags = pte_index >> 56;
531 			pte_index &= ((1ul << 56) - 1);
532 			req = flags >> 6;
533 			flags &= 3;
534 			if (req == 3) {		/* no more requests */
535 				i = 4;
536 				break;
537 			}
538 			if (req != 1 || flags == 3 ||
539 			    pte_index >= kvm->arch.hpt_npte) {
540 				/* parameter error */
541 				args[j] = ((0xa0 | flags) << 56) + pte_index;
542 				ret = H_PARAMETER;
543 				break;
544 			}
545 			hp = (unsigned long *)
546 				(kvm->arch.hpt_virt + (pte_index << 4));
547 			/* to avoid deadlock, don't spin except for first */
548 			if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
549 				if (n)
550 					break;
551 				while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
552 					cpu_relax();
553 			}
554 			found = 0;
555 			if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
556 				switch (flags & 3) {
557 				case 0:		/* absolute */
558 					found = 1;
559 					break;
560 				case 1:		/* andcond */
561 					if (!(hp[0] & args[j + 1]))
562 						found = 1;
563 					break;
564 				case 2:		/* AVPN */
565 					if ((hp[0] & ~0x7fUL) == args[j + 1])
566 						found = 1;
567 					break;
568 				}
569 			}
570 			if (!found) {
571 				hp[0] &= ~HPTE_V_HVLOCK;
572 				args[j] = ((0x90 | flags) << 56) + pte_index;
573 				continue;
574 			}
575 
576 			args[j] = ((0x80 | flags) << 56) + pte_index;
577 			rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
578 			note_hpte_modification(kvm, rev);
579 
580 			if (!(hp[0] & HPTE_V_VALID)) {
581 				/* insert R and C bits from PTE */
582 				rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
583 				args[j] |= rcbits << (56 - 5);
584 				hp[0] = 0;
585 				continue;
586 			}
587 
588 			hp[0] &= ~HPTE_V_VALID;		/* leave it locked */
589 			tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
590 			indexes[n] = j;
591 			hptes[n] = hp;
592 			revs[n] = rev;
593 			++n;
594 		}
595 
596 		if (!n)
597 			break;
598 
599 		/* Now that we've collected a batch, do the tlbies */
600 		do_tlbies(kvm, tlbrb, n, global, true);
601 
602 		/* Read PTE low words after tlbie to get final R/C values */
603 		for (k = 0; k < n; ++k) {
604 			j = indexes[k];
605 			pte_index = args[j] & ((1ul << 56) - 1);
606 			hp = hptes[k];
607 			rev = revs[k];
608 			remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
609 			rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
610 			args[j] |= rcbits << (56 - 5);
611 			hp[0] = 0;
612 		}
613 	}
614 
615 	return ret;
616 }
617 
618 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
619 		      unsigned long pte_index, unsigned long avpn,
620 		      unsigned long va)
621 {
622 	struct kvm *kvm = vcpu->kvm;
623 	unsigned long *hpte;
624 	struct revmap_entry *rev;
625 	unsigned long v, r, rb, mask, bits;
626 
627 	if (pte_index >= kvm->arch.hpt_npte)
628 		return H_PARAMETER;
629 
630 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
631 	while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
632 		cpu_relax();
633 	if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
634 	    ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
635 		hpte[0] &= ~HPTE_V_HVLOCK;
636 		return H_NOT_FOUND;
637 	}
638 
639 	v = hpte[0];
640 	bits = (flags << 55) & HPTE_R_PP0;
641 	bits |= (flags << 48) & HPTE_R_KEY_HI;
642 	bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
643 
644 	/* Update guest view of 2nd HPTE dword */
645 	mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
646 		HPTE_R_KEY_HI | HPTE_R_KEY_LO;
647 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
648 	if (rev) {
649 		r = (rev->guest_rpte & ~mask) | bits;
650 		rev->guest_rpte = r;
651 		note_hpte_modification(kvm, rev);
652 	}
653 	r = (hpte[1] & ~mask) | bits;
654 
655 	/* Update HPTE */
656 	if (v & HPTE_V_VALID) {
657 		rb = compute_tlbie_rb(v, r, pte_index);
658 		hpte[0] = v & ~HPTE_V_VALID;
659 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
660 		/*
661 		 * If the host has this page as readonly but the guest
662 		 * wants to make it read/write, reduce the permissions.
663 		 * Checking the host permissions involves finding the
664 		 * memslot and then the Linux PTE for the page.
665 		 */
666 		if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
667 			unsigned long psize, gfn, hva;
668 			struct kvm_memory_slot *memslot;
669 			pgd_t *pgdir = vcpu->arch.pgdir;
670 			pte_t pte;
671 
672 			psize = hpte_page_size(v, r);
673 			gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
674 			memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
675 			if (memslot) {
676 				hva = __gfn_to_hva_memslot(memslot, gfn);
677 				pte = lookup_linux_pte_and_update(pgdir, hva,
678 								  1, &psize);
679 				if (pte_present(pte) && !pte_write(pte))
680 					r = hpte_make_readonly(r);
681 			}
682 		}
683 	}
684 	hpte[1] = r;
685 	eieio();
686 	hpte[0] = v & ~HPTE_V_HVLOCK;
687 	asm volatile("ptesync" : : : "memory");
688 	return H_SUCCESS;
689 }
690 
691 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
692 		   unsigned long pte_index)
693 {
694 	struct kvm *kvm = vcpu->kvm;
695 	unsigned long *hpte, v, r;
696 	int i, n = 1;
697 	struct revmap_entry *rev = NULL;
698 
699 	if (pte_index >= kvm->arch.hpt_npte)
700 		return H_PARAMETER;
701 	if (flags & H_READ_4) {
702 		pte_index &= ~3;
703 		n = 4;
704 	}
705 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
706 	for (i = 0; i < n; ++i, ++pte_index) {
707 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
708 		v = hpte[0] & ~HPTE_V_HVLOCK;
709 		r = hpte[1];
710 		if (v & HPTE_V_ABSENT) {
711 			v &= ~HPTE_V_ABSENT;
712 			v |= HPTE_V_VALID;
713 		}
714 		if (v & HPTE_V_VALID) {
715 			r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
716 			r &= ~HPTE_GR_RESERVED;
717 		}
718 		vcpu->arch.gpr[4 + i * 2] = v;
719 		vcpu->arch.gpr[5 + i * 2] = r;
720 	}
721 	return H_SUCCESS;
722 }
723 
724 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
725 			unsigned long pte_index)
726 {
727 	unsigned long rb;
728 
729 	hptep[0] &= ~HPTE_V_VALID;
730 	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
731 	do_tlbies(kvm, &rb, 1, 1, true);
732 }
733 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
734 
735 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
736 			   unsigned long pte_index)
737 {
738 	unsigned long rb;
739 	unsigned char rbyte;
740 
741 	rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
742 	rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
743 	/* modify only the second-last byte, which contains the ref bit */
744 	*((char *)hptep + 14) = rbyte;
745 	do_tlbies(kvm, &rb, 1, 1, false);
746 }
747 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
748 
749 static int slb_base_page_shift[4] = {
750 	24,	/* 16M */
751 	16,	/* 64k */
752 	34,	/* 16G */
753 	20,	/* 1M, unsupported */
754 };
755 
756 /* When called from virtmode, this func should be protected by
757  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
758  * can trigger deadlock issue.
759  */
760 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
761 			      unsigned long valid)
762 {
763 	unsigned int i;
764 	unsigned int pshift;
765 	unsigned long somask;
766 	unsigned long vsid, hash;
767 	unsigned long avpn;
768 	unsigned long *hpte;
769 	unsigned long mask, val;
770 	unsigned long v, r;
771 
772 	/* Get page shift, work out hash and AVPN etc. */
773 	mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
774 	val = 0;
775 	pshift = 12;
776 	if (slb_v & SLB_VSID_L) {
777 		mask |= HPTE_V_LARGE;
778 		val |= HPTE_V_LARGE;
779 		pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
780 	}
781 	if (slb_v & SLB_VSID_B_1T) {
782 		somask = (1UL << 40) - 1;
783 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
784 		vsid ^= vsid << 25;
785 	} else {
786 		somask = (1UL << 28) - 1;
787 		vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
788 	}
789 	hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
790 	avpn = slb_v & ~(somask >> 16);	/* also includes B */
791 	avpn |= (eaddr & somask) >> 16;
792 
793 	if (pshift >= 24)
794 		avpn &= ~((1UL << (pshift - 16)) - 1);
795 	else
796 		avpn &= ~0x7fUL;
797 	val |= avpn;
798 
799 	for (;;) {
800 		hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
801 
802 		for (i = 0; i < 16; i += 2) {
803 			/* Read the PTE racily */
804 			v = hpte[i] & ~HPTE_V_HVLOCK;
805 
806 			/* Check valid/absent, hash, segment size and AVPN */
807 			if (!(v & valid) || (v & mask) != val)
808 				continue;
809 
810 			/* Lock the PTE and read it under the lock */
811 			while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
812 				cpu_relax();
813 			v = hpte[i] & ~HPTE_V_HVLOCK;
814 			r = hpte[i+1];
815 
816 			/*
817 			 * Check the HPTE again, including large page size
818 			 * Since we don't currently allow any MPSS (mixed
819 			 * page-size segment) page sizes, it is sufficient
820 			 * to check against the actual page size.
821 			 */
822 			if ((v & valid) && (v & mask) == val &&
823 			    hpte_page_size(v, r) == (1ul << pshift))
824 				/* Return with the HPTE still locked */
825 				return (hash << 3) + (i >> 1);
826 
827 			/* Unlock and move on */
828 			hpte[i] = v;
829 		}
830 
831 		if (val & HPTE_V_SECONDARY)
832 			break;
833 		val |= HPTE_V_SECONDARY;
834 		hash = hash ^ kvm->arch.hpt_mask;
835 	}
836 	return -1;
837 }
838 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
839 
840 /*
841  * Called in real mode to check whether an HPTE not found fault
842  * is due to accessing a paged-out page or an emulated MMIO page,
843  * or if a protection fault is due to accessing a page that the
844  * guest wanted read/write access to but which we made read-only.
845  * Returns a possibly modified status (DSISR) value if not
846  * (i.e. pass the interrupt to the guest),
847  * -1 to pass the fault up to host kernel mode code, -2 to do that
848  * and also load the instruction word (for MMIO emulation),
849  * or 0 if we should make the guest retry the access.
850  */
851 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
852 			  unsigned long slb_v, unsigned int status, bool data)
853 {
854 	struct kvm *kvm = vcpu->kvm;
855 	long int index;
856 	unsigned long v, r, gr;
857 	unsigned long *hpte;
858 	unsigned long valid;
859 	struct revmap_entry *rev;
860 	unsigned long pp, key;
861 
862 	/* For protection fault, expect to find a valid HPTE */
863 	valid = HPTE_V_VALID;
864 	if (status & DSISR_NOHPTE)
865 		valid |= HPTE_V_ABSENT;
866 
867 	index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
868 	if (index < 0) {
869 		if (status & DSISR_NOHPTE)
870 			return status;	/* there really was no HPTE */
871 		return 0;		/* for prot fault, HPTE disappeared */
872 	}
873 	hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
874 	v = hpte[0] & ~HPTE_V_HVLOCK;
875 	r = hpte[1];
876 	rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
877 	gr = rev->guest_rpte;
878 
879 	unlock_hpte(hpte, v);
880 
881 	/* For not found, if the HPTE is valid by now, retry the instruction */
882 	if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
883 		return 0;
884 
885 	/* Check access permissions to the page */
886 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
887 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
888 	status &= ~DSISR_NOHPTE;	/* DSISR_NOHPTE == SRR1_ISI_NOPT */
889 	if (!data) {
890 		if (gr & (HPTE_R_N | HPTE_R_G))
891 			return status | SRR1_ISI_N_OR_G;
892 		if (!hpte_read_permission(pp, slb_v & key))
893 			return status | SRR1_ISI_PROT;
894 	} else if (status & DSISR_ISSTORE) {
895 		/* check write permission */
896 		if (!hpte_write_permission(pp, slb_v & key))
897 			return status | DSISR_PROTFAULT;
898 	} else {
899 		if (!hpte_read_permission(pp, slb_v & key))
900 			return status | DSISR_PROTFAULT;
901 	}
902 
903 	/* Check storage key, if applicable */
904 	if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
905 		unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
906 		if (status & DSISR_ISSTORE)
907 			perm >>= 1;
908 		if (perm & 1)
909 			return status | DSISR_KEYFAULT;
910 	}
911 
912 	/* Save HPTE info for virtual-mode handler */
913 	vcpu->arch.pgfault_addr = addr;
914 	vcpu->arch.pgfault_index = index;
915 	vcpu->arch.pgfault_hpte[0] = v;
916 	vcpu->arch.pgfault_hpte[1] = r;
917 
918 	/* Check the storage key to see if it is possibly emulated MMIO */
919 	if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
920 	    (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
921 	    (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
922 		return -2;	/* MMIO emulation - load instr word */
923 
924 	return -1;		/* send fault up to host kernel mode */
925 }
926