1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  */
17 
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 
31 #include <asm/tlbflush.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/kvm_book3s.h>
34 #include <asm/mmu-hash64.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/cputable.h>
39 
40 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
41 #define MAX_LPID_970	63
42 
43 /* Power architecture requires HPT is at least 256kB */
44 #define PPC_MIN_HPT_ORDER	18
45 
46 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
47 				long pte_index, unsigned long pteh,
48 				unsigned long ptel, unsigned long *pte_idx_ret);
49 static void kvmppc_rmap_reset(struct kvm *kvm);
50 
51 long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
52 {
53 	unsigned long hpt;
54 	struct revmap_entry *rev;
55 	struct kvmppc_linear_info *li;
56 	long order = kvm_hpt_order;
57 
58 	if (htab_orderp) {
59 		order = *htab_orderp;
60 		if (order < PPC_MIN_HPT_ORDER)
61 			order = PPC_MIN_HPT_ORDER;
62 	}
63 
64 	/*
65 	 * If the user wants a different size from default,
66 	 * try first to allocate it from the kernel page allocator.
67 	 */
68 	hpt = 0;
69 	if (order != kvm_hpt_order) {
70 		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
71 				       __GFP_NOWARN, order - PAGE_SHIFT);
72 		if (!hpt)
73 			--order;
74 	}
75 
76 	/* Next try to allocate from the preallocated pool */
77 	if (!hpt) {
78 		li = kvm_alloc_hpt();
79 		if (li) {
80 			hpt = (ulong)li->base_virt;
81 			kvm->arch.hpt_li = li;
82 			order = kvm_hpt_order;
83 		}
84 	}
85 
86 	/* Lastly try successively smaller sizes from the page allocator */
87 	while (!hpt && order > PPC_MIN_HPT_ORDER) {
88 		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
89 				       __GFP_NOWARN, order - PAGE_SHIFT);
90 		if (!hpt)
91 			--order;
92 	}
93 
94 	if (!hpt)
95 		return -ENOMEM;
96 
97 	kvm->arch.hpt_virt = hpt;
98 	kvm->arch.hpt_order = order;
99 	/* HPTEs are 2**4 bytes long */
100 	kvm->arch.hpt_npte = 1ul << (order - 4);
101 	/* 128 (2**7) bytes in each HPTEG */
102 	kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
103 
104 	/* Allocate reverse map array */
105 	rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
106 	if (!rev) {
107 		pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
108 		goto out_freehpt;
109 	}
110 	kvm->arch.revmap = rev;
111 	kvm->arch.sdr1 = __pa(hpt) | (order - 18);
112 
113 	pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
114 		hpt, order, kvm->arch.lpid);
115 
116 	if (htab_orderp)
117 		*htab_orderp = order;
118 	return 0;
119 
120  out_freehpt:
121 	if (kvm->arch.hpt_li)
122 		kvm_release_hpt(kvm->arch.hpt_li);
123 	else
124 		free_pages(hpt, order - PAGE_SHIFT);
125 	return -ENOMEM;
126 }
127 
128 long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
129 {
130 	long err = -EBUSY;
131 	long order;
132 
133 	mutex_lock(&kvm->lock);
134 	if (kvm->arch.rma_setup_done) {
135 		kvm->arch.rma_setup_done = 0;
136 		/* order rma_setup_done vs. vcpus_running */
137 		smp_mb();
138 		if (atomic_read(&kvm->arch.vcpus_running)) {
139 			kvm->arch.rma_setup_done = 1;
140 			goto out;
141 		}
142 	}
143 	if (kvm->arch.hpt_virt) {
144 		order = kvm->arch.hpt_order;
145 		/* Set the entire HPT to 0, i.e. invalid HPTEs */
146 		memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
147 		/*
148 		 * Reset all the reverse-mapping chains for all memslots
149 		 */
150 		kvmppc_rmap_reset(kvm);
151 		/* Ensure that each vcpu will flush its TLB on next entry. */
152 		cpumask_setall(&kvm->arch.need_tlb_flush);
153 		*htab_orderp = order;
154 		err = 0;
155 	} else {
156 		err = kvmppc_alloc_hpt(kvm, htab_orderp);
157 		order = *htab_orderp;
158 	}
159  out:
160 	mutex_unlock(&kvm->lock);
161 	return err;
162 }
163 
164 void kvmppc_free_hpt(struct kvm *kvm)
165 {
166 	kvmppc_free_lpid(kvm->arch.lpid);
167 	vfree(kvm->arch.revmap);
168 	if (kvm->arch.hpt_li)
169 		kvm_release_hpt(kvm->arch.hpt_li);
170 	else
171 		free_pages(kvm->arch.hpt_virt,
172 			   kvm->arch.hpt_order - PAGE_SHIFT);
173 }
174 
175 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
176 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
177 {
178 	return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
179 }
180 
181 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
182 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
183 {
184 	return (pgsize == 0x10000) ? 0x1000 : 0;
185 }
186 
187 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
188 		     unsigned long porder)
189 {
190 	unsigned long i;
191 	unsigned long npages;
192 	unsigned long hp_v, hp_r;
193 	unsigned long addr, hash;
194 	unsigned long psize;
195 	unsigned long hp0, hp1;
196 	unsigned long idx_ret;
197 	long ret;
198 	struct kvm *kvm = vcpu->kvm;
199 
200 	psize = 1ul << porder;
201 	npages = memslot->npages >> (porder - PAGE_SHIFT);
202 
203 	/* VRMA can't be > 1TB */
204 	if (npages > 1ul << (40 - porder))
205 		npages = 1ul << (40 - porder);
206 	/* Can't use more than 1 HPTE per HPTEG */
207 	if (npages > kvm->arch.hpt_mask + 1)
208 		npages = kvm->arch.hpt_mask + 1;
209 
210 	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
211 		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
212 	hp1 = hpte1_pgsize_encoding(psize) |
213 		HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
214 
215 	for (i = 0; i < npages; ++i) {
216 		addr = i << porder;
217 		/* can't use hpt_hash since va > 64 bits */
218 		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
219 		/*
220 		 * We assume that the hash table is empty and no
221 		 * vcpus are using it at this stage.  Since we create
222 		 * at most one HPTE per HPTEG, we just assume entry 7
223 		 * is available and use it.
224 		 */
225 		hash = (hash << 3) + 7;
226 		hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
227 		hp_r = hp1 | addr;
228 		ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
229 						 &idx_ret);
230 		if (ret != H_SUCCESS) {
231 			pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
232 			       addr, ret);
233 			break;
234 		}
235 	}
236 }
237 
238 int kvmppc_mmu_hv_init(void)
239 {
240 	unsigned long host_lpid, rsvd_lpid;
241 
242 	if (!cpu_has_feature(CPU_FTR_HVMODE))
243 		return -EINVAL;
244 
245 	/* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
246 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
247 		host_lpid = mfspr(SPRN_LPID);	/* POWER7 */
248 		rsvd_lpid = LPID_RSVD;
249 	} else {
250 		host_lpid = 0;			/* PPC970 */
251 		rsvd_lpid = MAX_LPID_970;
252 	}
253 
254 	kvmppc_init_lpid(rsvd_lpid + 1);
255 
256 	kvmppc_claim_lpid(host_lpid);
257 	/* rsvd_lpid is reserved for use in partition switching */
258 	kvmppc_claim_lpid(rsvd_lpid);
259 
260 	return 0;
261 }
262 
263 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
264 {
265 }
266 
267 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
268 {
269 	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
270 }
271 
272 /*
273  * This is called to get a reference to a guest page if there isn't
274  * one already in the memslot->arch.slot_phys[] array.
275  */
276 static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
277 				  struct kvm_memory_slot *memslot,
278 				  unsigned long psize)
279 {
280 	unsigned long start;
281 	long np, err;
282 	struct page *page, *hpage, *pages[1];
283 	unsigned long s, pgsize;
284 	unsigned long *physp;
285 	unsigned int is_io, got, pgorder;
286 	struct vm_area_struct *vma;
287 	unsigned long pfn, i, npages;
288 
289 	physp = memslot->arch.slot_phys;
290 	if (!physp)
291 		return -EINVAL;
292 	if (physp[gfn - memslot->base_gfn])
293 		return 0;
294 
295 	is_io = 0;
296 	got = 0;
297 	page = NULL;
298 	pgsize = psize;
299 	err = -EINVAL;
300 	start = gfn_to_hva_memslot(memslot, gfn);
301 
302 	/* Instantiate and get the page we want access to */
303 	np = get_user_pages_fast(start, 1, 1, pages);
304 	if (np != 1) {
305 		/* Look up the vma for the page */
306 		down_read(&current->mm->mmap_sem);
307 		vma = find_vma(current->mm, start);
308 		if (!vma || vma->vm_start > start ||
309 		    start + psize > vma->vm_end ||
310 		    !(vma->vm_flags & VM_PFNMAP))
311 			goto up_err;
312 		is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
313 		pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
314 		/* check alignment of pfn vs. requested page size */
315 		if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
316 			goto up_err;
317 		up_read(&current->mm->mmap_sem);
318 
319 	} else {
320 		page = pages[0];
321 		got = KVMPPC_GOT_PAGE;
322 
323 		/* See if this is a large page */
324 		s = PAGE_SIZE;
325 		if (PageHuge(page)) {
326 			hpage = compound_head(page);
327 			s <<= compound_order(hpage);
328 			/* Get the whole large page if slot alignment is ok */
329 			if (s > psize && slot_is_aligned(memslot, s) &&
330 			    !(memslot->userspace_addr & (s - 1))) {
331 				start &= ~(s - 1);
332 				pgsize = s;
333 				get_page(hpage);
334 				put_page(page);
335 				page = hpage;
336 			}
337 		}
338 		if (s < psize)
339 			goto out;
340 		pfn = page_to_pfn(page);
341 	}
342 
343 	npages = pgsize >> PAGE_SHIFT;
344 	pgorder = __ilog2(npages);
345 	physp += (gfn - memslot->base_gfn) & ~(npages - 1);
346 	spin_lock(&kvm->arch.slot_phys_lock);
347 	for (i = 0; i < npages; ++i) {
348 		if (!physp[i]) {
349 			physp[i] = ((pfn + i) << PAGE_SHIFT) +
350 				got + is_io + pgorder;
351 			got = 0;
352 		}
353 	}
354 	spin_unlock(&kvm->arch.slot_phys_lock);
355 	err = 0;
356 
357  out:
358 	if (got)
359 		put_page(page);
360 	return err;
361 
362  up_err:
363 	up_read(&current->mm->mmap_sem);
364 	return err;
365 }
366 
367 long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
368 				long pte_index, unsigned long pteh,
369 				unsigned long ptel, unsigned long *pte_idx_ret)
370 {
371 	unsigned long psize, gpa, gfn;
372 	struct kvm_memory_slot *memslot;
373 	long ret;
374 
375 	if (kvm->arch.using_mmu_notifiers)
376 		goto do_insert;
377 
378 	psize = hpte_page_size(pteh, ptel);
379 	if (!psize)
380 		return H_PARAMETER;
381 
382 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
383 
384 	/* Find the memslot (if any) for this address */
385 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
386 	gfn = gpa >> PAGE_SHIFT;
387 	memslot = gfn_to_memslot(kvm, gfn);
388 	if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
389 		if (!slot_is_aligned(memslot, psize))
390 			return H_PARAMETER;
391 		if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
392 			return H_PARAMETER;
393 	}
394 
395  do_insert:
396 	/* Protect linux PTE lookup from page table destruction */
397 	rcu_read_lock_sched();	/* this disables preemption too */
398 	ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
399 				current->mm->pgd, false, pte_idx_ret);
400 	rcu_read_unlock_sched();
401 	if (ret == H_TOO_HARD) {
402 		/* this can't happen */
403 		pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
404 		ret = H_RESOURCE;	/* or something */
405 	}
406 	return ret;
407 
408 }
409 
410 /*
411  * We come here on a H_ENTER call from the guest when we are not
412  * using mmu notifiers and we don't have the requested page pinned
413  * already.
414  */
415 long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
416 			     long pte_index, unsigned long pteh,
417 			     unsigned long ptel)
418 {
419 	return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
420 					  pteh, ptel, &vcpu->arch.gpr[4]);
421 }
422 
423 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
424 							 gva_t eaddr)
425 {
426 	u64 mask;
427 	int i;
428 
429 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
430 		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
431 			continue;
432 
433 		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
434 			mask = ESID_MASK_1T;
435 		else
436 			mask = ESID_MASK;
437 
438 		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
439 			return &vcpu->arch.slb[i];
440 	}
441 	return NULL;
442 }
443 
444 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
445 			unsigned long ea)
446 {
447 	unsigned long ra_mask;
448 
449 	ra_mask = hpte_page_size(v, r) - 1;
450 	return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
451 }
452 
453 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
454 			struct kvmppc_pte *gpte, bool data)
455 {
456 	struct kvm *kvm = vcpu->kvm;
457 	struct kvmppc_slb *slbe;
458 	unsigned long slb_v;
459 	unsigned long pp, key;
460 	unsigned long v, gr;
461 	unsigned long *hptep;
462 	int index;
463 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
464 
465 	/* Get SLB entry */
466 	if (virtmode) {
467 		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
468 		if (!slbe)
469 			return -EINVAL;
470 		slb_v = slbe->origv;
471 	} else {
472 		/* real mode access */
473 		slb_v = vcpu->kvm->arch.vrma_slb_v;
474 	}
475 
476 	/* Find the HPTE in the hash table */
477 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
478 					 HPTE_V_VALID | HPTE_V_ABSENT);
479 	if (index < 0)
480 		return -ENOENT;
481 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
482 	v = hptep[0] & ~HPTE_V_HVLOCK;
483 	gr = kvm->arch.revmap[index].guest_rpte;
484 
485 	/* Unlock the HPTE */
486 	asm volatile("lwsync" : : : "memory");
487 	hptep[0] = v;
488 
489 	gpte->eaddr = eaddr;
490 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
491 
492 	/* Get PP bits and key for permission check */
493 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
494 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
495 	key &= slb_v;
496 
497 	/* Calculate permissions */
498 	gpte->may_read = hpte_read_permission(pp, key);
499 	gpte->may_write = hpte_write_permission(pp, key);
500 	gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
501 
502 	/* Storage key permission check for POWER7 */
503 	if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
504 		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
505 		if (amrfield & 1)
506 			gpte->may_read = 0;
507 		if (amrfield & 2)
508 			gpte->may_write = 0;
509 	}
510 
511 	/* Get the guest physical address */
512 	gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
513 	return 0;
514 }
515 
516 /*
517  * Quick test for whether an instruction is a load or a store.
518  * If the instruction is a load or a store, then this will indicate
519  * which it is, at least on server processors.  (Embedded processors
520  * have some external PID instructions that don't follow the rule
521  * embodied here.)  If the instruction isn't a load or store, then
522  * this doesn't return anything useful.
523  */
524 static int instruction_is_store(unsigned int instr)
525 {
526 	unsigned int mask;
527 
528 	mask = 0x10000000;
529 	if ((instr & 0xfc000000) == 0x7c000000)
530 		mask = 0x100;		/* major opcode 31 */
531 	return (instr & mask) != 0;
532 }
533 
534 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
535 				  unsigned long gpa, gva_t ea, int is_store)
536 {
537 	int ret;
538 	u32 last_inst;
539 	unsigned long srr0 = kvmppc_get_pc(vcpu);
540 
541 	/* We try to load the last instruction.  We don't let
542 	 * emulate_instruction do it as it doesn't check what
543 	 * kvmppc_ld returns.
544 	 * If we fail, we just return to the guest and try executing it again.
545 	 */
546 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
547 		ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
548 		if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
549 			return RESUME_GUEST;
550 		vcpu->arch.last_inst = last_inst;
551 	}
552 
553 	/*
554 	 * WARNING: We do not know for sure whether the instruction we just
555 	 * read from memory is the same that caused the fault in the first
556 	 * place.  If the instruction we read is neither an load or a store,
557 	 * then it can't access memory, so we don't need to worry about
558 	 * enforcing access permissions.  So, assuming it is a load or
559 	 * store, we just check that its direction (load or store) is
560 	 * consistent with the original fault, since that's what we
561 	 * checked the access permissions against.  If there is a mismatch
562 	 * we just return and retry the instruction.
563 	 */
564 
565 	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
566 		return RESUME_GUEST;
567 
568 	/*
569 	 * Emulated accesses are emulated by looking at the hash for
570 	 * translation once, then performing the access later. The
571 	 * translation could be invalidated in the meantime in which
572 	 * point performing the subsequent memory access on the old
573 	 * physical address could possibly be a security hole for the
574 	 * guest (but not the host).
575 	 *
576 	 * This is less of an issue for MMIO stores since they aren't
577 	 * globally visible. It could be an issue for MMIO loads to
578 	 * a certain extent but we'll ignore it for now.
579 	 */
580 
581 	vcpu->arch.paddr_accessed = gpa;
582 	vcpu->arch.vaddr_accessed = ea;
583 	return kvmppc_emulate_mmio(run, vcpu);
584 }
585 
586 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
587 				unsigned long ea, unsigned long dsisr)
588 {
589 	struct kvm *kvm = vcpu->kvm;
590 	unsigned long *hptep, hpte[3], r;
591 	unsigned long mmu_seq, psize, pte_size;
592 	unsigned long gpa, gfn, hva, pfn;
593 	struct kvm_memory_slot *memslot;
594 	unsigned long *rmap;
595 	struct revmap_entry *rev;
596 	struct page *page, *pages[1];
597 	long index, ret, npages;
598 	unsigned long is_io;
599 	unsigned int writing, write_ok;
600 	struct vm_area_struct *vma;
601 	unsigned long rcbits;
602 
603 	/*
604 	 * Real-mode code has already searched the HPT and found the
605 	 * entry we're interested in.  Lock the entry and check that
606 	 * it hasn't changed.  If it has, just return and re-execute the
607 	 * instruction.
608 	 */
609 	if (ea != vcpu->arch.pgfault_addr)
610 		return RESUME_GUEST;
611 	index = vcpu->arch.pgfault_index;
612 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
613 	rev = &kvm->arch.revmap[index];
614 	preempt_disable();
615 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
616 		cpu_relax();
617 	hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
618 	hpte[1] = hptep[1];
619 	hpte[2] = r = rev->guest_rpte;
620 	asm volatile("lwsync" : : : "memory");
621 	hptep[0] = hpte[0];
622 	preempt_enable();
623 
624 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
625 	    hpte[1] != vcpu->arch.pgfault_hpte[1])
626 		return RESUME_GUEST;
627 
628 	/* Translate the logical address and get the page */
629 	psize = hpte_page_size(hpte[0], r);
630 	gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
631 	gfn = gpa >> PAGE_SHIFT;
632 	memslot = gfn_to_memslot(kvm, gfn);
633 
634 	/* No memslot means it's an emulated MMIO region */
635 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
636 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
637 					      dsisr & DSISR_ISSTORE);
638 
639 	if (!kvm->arch.using_mmu_notifiers)
640 		return -EFAULT;		/* should never get here */
641 
642 	/* used to check for invalidations in progress */
643 	mmu_seq = kvm->mmu_notifier_seq;
644 	smp_rmb();
645 
646 	is_io = 0;
647 	pfn = 0;
648 	page = NULL;
649 	pte_size = PAGE_SIZE;
650 	writing = (dsisr & DSISR_ISSTORE) != 0;
651 	/* If writing != 0, then the HPTE must allow writing, if we get here */
652 	write_ok = writing;
653 	hva = gfn_to_hva_memslot(memslot, gfn);
654 	npages = get_user_pages_fast(hva, 1, writing, pages);
655 	if (npages < 1) {
656 		/* Check if it's an I/O mapping */
657 		down_read(&current->mm->mmap_sem);
658 		vma = find_vma(current->mm, hva);
659 		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
660 		    (vma->vm_flags & VM_PFNMAP)) {
661 			pfn = vma->vm_pgoff +
662 				((hva - vma->vm_start) >> PAGE_SHIFT);
663 			pte_size = psize;
664 			is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
665 			write_ok = vma->vm_flags & VM_WRITE;
666 		}
667 		up_read(&current->mm->mmap_sem);
668 		if (!pfn)
669 			return -EFAULT;
670 	} else {
671 		page = pages[0];
672 		if (PageHuge(page)) {
673 			page = compound_head(page);
674 			pte_size <<= compound_order(page);
675 		}
676 		/* if the guest wants write access, see if that is OK */
677 		if (!writing && hpte_is_writable(r)) {
678 			pte_t *ptep, pte;
679 
680 			/*
681 			 * We need to protect against page table destruction
682 			 * while looking up and updating the pte.
683 			 */
684 			rcu_read_lock_sched();
685 			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
686 							 hva, NULL);
687 			if (ptep && pte_present(*ptep)) {
688 				pte = kvmppc_read_update_linux_pte(ptep, 1);
689 				if (pte_write(pte))
690 					write_ok = 1;
691 			}
692 			rcu_read_unlock_sched();
693 		}
694 		pfn = page_to_pfn(page);
695 	}
696 
697 	ret = -EFAULT;
698 	if (psize > pte_size)
699 		goto out_put;
700 
701 	/* Check WIMG vs. the actual page we're accessing */
702 	if (!hpte_cache_flags_ok(r, is_io)) {
703 		if (is_io)
704 			return -EFAULT;
705 		/*
706 		 * Allow guest to map emulated device memory as
707 		 * uncacheable, but actually make it cacheable.
708 		 */
709 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
710 	}
711 
712 	/* Set the HPTE to point to pfn */
713 	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
714 	if (hpte_is_writable(r) && !write_ok)
715 		r = hpte_make_readonly(r);
716 	ret = RESUME_GUEST;
717 	preempt_disable();
718 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
719 		cpu_relax();
720 	if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
721 	    rev->guest_rpte != hpte[2])
722 		/* HPTE has been changed under us; let the guest retry */
723 		goto out_unlock;
724 	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
725 
726 	rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
727 	lock_rmap(rmap);
728 
729 	/* Check if we might have been invalidated; let the guest retry if so */
730 	ret = RESUME_GUEST;
731 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
732 		unlock_rmap(rmap);
733 		goto out_unlock;
734 	}
735 
736 	/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
737 	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
738 	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
739 
740 	if (hptep[0] & HPTE_V_VALID) {
741 		/* HPTE was previously valid, so we need to invalidate it */
742 		unlock_rmap(rmap);
743 		hptep[0] |= HPTE_V_ABSENT;
744 		kvmppc_invalidate_hpte(kvm, hptep, index);
745 		/* don't lose previous R and C bits */
746 		r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
747 	} else {
748 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
749 	}
750 
751 	hptep[1] = r;
752 	eieio();
753 	hptep[0] = hpte[0];
754 	asm volatile("ptesync" : : : "memory");
755 	preempt_enable();
756 	if (page && hpte_is_writable(r))
757 		SetPageDirty(page);
758 
759  out_put:
760 	if (page) {
761 		/*
762 		 * We drop pages[0] here, not page because page might
763 		 * have been set to the head page of a compound, but
764 		 * we have to drop the reference on the correct tail
765 		 * page to match the get inside gup()
766 		 */
767 		put_page(pages[0]);
768 	}
769 	return ret;
770 
771  out_unlock:
772 	hptep[0] &= ~HPTE_V_HVLOCK;
773 	preempt_enable();
774 	goto out_put;
775 }
776 
777 static void kvmppc_rmap_reset(struct kvm *kvm)
778 {
779 	struct kvm_memslots *slots;
780 	struct kvm_memory_slot *memslot;
781 	int srcu_idx;
782 
783 	srcu_idx = srcu_read_lock(&kvm->srcu);
784 	slots = kvm->memslots;
785 	kvm_for_each_memslot(memslot, slots) {
786 		/*
787 		 * This assumes it is acceptable to lose reference and
788 		 * change bits across a reset.
789 		 */
790 		memset(memslot->arch.rmap, 0,
791 		       memslot->npages * sizeof(*memslot->arch.rmap));
792 	}
793 	srcu_read_unlock(&kvm->srcu, srcu_idx);
794 }
795 
796 static int kvm_handle_hva_range(struct kvm *kvm,
797 				unsigned long start,
798 				unsigned long end,
799 				int (*handler)(struct kvm *kvm,
800 					       unsigned long *rmapp,
801 					       unsigned long gfn))
802 {
803 	int ret;
804 	int retval = 0;
805 	struct kvm_memslots *slots;
806 	struct kvm_memory_slot *memslot;
807 
808 	slots = kvm_memslots(kvm);
809 	kvm_for_each_memslot(memslot, slots) {
810 		unsigned long hva_start, hva_end;
811 		gfn_t gfn, gfn_end;
812 
813 		hva_start = max(start, memslot->userspace_addr);
814 		hva_end = min(end, memslot->userspace_addr +
815 					(memslot->npages << PAGE_SHIFT));
816 		if (hva_start >= hva_end)
817 			continue;
818 		/*
819 		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
820 		 * {gfn, gfn+1, ..., gfn_end-1}.
821 		 */
822 		gfn = hva_to_gfn_memslot(hva_start, memslot);
823 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
824 
825 		for (; gfn < gfn_end; ++gfn) {
826 			gfn_t gfn_offset = gfn - memslot->base_gfn;
827 
828 			ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
829 			retval |= ret;
830 		}
831 	}
832 
833 	return retval;
834 }
835 
836 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
837 			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
838 					 unsigned long gfn))
839 {
840 	return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
841 }
842 
843 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
844 			   unsigned long gfn)
845 {
846 	struct revmap_entry *rev = kvm->arch.revmap;
847 	unsigned long h, i, j;
848 	unsigned long *hptep;
849 	unsigned long ptel, psize, rcbits;
850 
851 	for (;;) {
852 		lock_rmap(rmapp);
853 		if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
854 			unlock_rmap(rmapp);
855 			break;
856 		}
857 
858 		/*
859 		 * To avoid an ABBA deadlock with the HPTE lock bit,
860 		 * we can't spin on the HPTE lock while holding the
861 		 * rmap chain lock.
862 		 */
863 		i = *rmapp & KVMPPC_RMAP_INDEX;
864 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
865 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
866 			/* unlock rmap before spinning on the HPTE lock */
867 			unlock_rmap(rmapp);
868 			while (hptep[0] & HPTE_V_HVLOCK)
869 				cpu_relax();
870 			continue;
871 		}
872 		j = rev[i].forw;
873 		if (j == i) {
874 			/* chain is now empty */
875 			*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
876 		} else {
877 			/* remove i from chain */
878 			h = rev[i].back;
879 			rev[h].forw = j;
880 			rev[j].back = h;
881 			rev[i].forw = rev[i].back = i;
882 			*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
883 		}
884 
885 		/* Now check and modify the HPTE */
886 		ptel = rev[i].guest_rpte;
887 		psize = hpte_page_size(hptep[0], ptel);
888 		if ((hptep[0] & HPTE_V_VALID) &&
889 		    hpte_rpn(ptel, psize) == gfn) {
890 			if (kvm->arch.using_mmu_notifiers)
891 				hptep[0] |= HPTE_V_ABSENT;
892 			kvmppc_invalidate_hpte(kvm, hptep, i);
893 			/* Harvest R and C */
894 			rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
895 			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
896 			if (rcbits & ~rev[i].guest_rpte) {
897 				rev[i].guest_rpte = ptel | rcbits;
898 				note_hpte_modification(kvm, &rev[i]);
899 			}
900 		}
901 		unlock_rmap(rmapp);
902 		hptep[0] &= ~HPTE_V_HVLOCK;
903 	}
904 	return 0;
905 }
906 
907 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
908 {
909 	if (kvm->arch.using_mmu_notifiers)
910 		kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
911 	return 0;
912 }
913 
914 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
915 {
916 	if (kvm->arch.using_mmu_notifiers)
917 		kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
918 	return 0;
919 }
920 
921 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
922 {
923 	unsigned long *rmapp;
924 	unsigned long gfn;
925 	unsigned long n;
926 
927 	rmapp = memslot->arch.rmap;
928 	gfn = memslot->base_gfn;
929 	for (n = memslot->npages; n; --n) {
930 		/*
931 		 * Testing the present bit without locking is OK because
932 		 * the memslot has been marked invalid already, and hence
933 		 * no new HPTEs referencing this page can be created,
934 		 * thus the present bit can't go from 0 to 1.
935 		 */
936 		if (*rmapp & KVMPPC_RMAP_PRESENT)
937 			kvm_unmap_rmapp(kvm, rmapp, gfn);
938 		++rmapp;
939 		++gfn;
940 	}
941 }
942 
943 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
944 			 unsigned long gfn)
945 {
946 	struct revmap_entry *rev = kvm->arch.revmap;
947 	unsigned long head, i, j;
948 	unsigned long *hptep;
949 	int ret = 0;
950 
951  retry:
952 	lock_rmap(rmapp);
953 	if (*rmapp & KVMPPC_RMAP_REFERENCED) {
954 		*rmapp &= ~KVMPPC_RMAP_REFERENCED;
955 		ret = 1;
956 	}
957 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
958 		unlock_rmap(rmapp);
959 		return ret;
960 	}
961 
962 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
963 	do {
964 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
965 		j = rev[i].forw;
966 
967 		/* If this HPTE isn't referenced, ignore it */
968 		if (!(hptep[1] & HPTE_R_R))
969 			continue;
970 
971 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
972 			/* unlock rmap before spinning on the HPTE lock */
973 			unlock_rmap(rmapp);
974 			while (hptep[0] & HPTE_V_HVLOCK)
975 				cpu_relax();
976 			goto retry;
977 		}
978 
979 		/* Now check and modify the HPTE */
980 		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
981 			kvmppc_clear_ref_hpte(kvm, hptep, i);
982 			if (!(rev[i].guest_rpte & HPTE_R_R)) {
983 				rev[i].guest_rpte |= HPTE_R_R;
984 				note_hpte_modification(kvm, &rev[i]);
985 			}
986 			ret = 1;
987 		}
988 		hptep[0] &= ~HPTE_V_HVLOCK;
989 	} while ((i = j) != head);
990 
991 	unlock_rmap(rmapp);
992 	return ret;
993 }
994 
995 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
996 {
997 	if (!kvm->arch.using_mmu_notifiers)
998 		return 0;
999 	return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
1000 }
1001 
1002 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1003 			      unsigned long gfn)
1004 {
1005 	struct revmap_entry *rev = kvm->arch.revmap;
1006 	unsigned long head, i, j;
1007 	unsigned long *hp;
1008 	int ret = 1;
1009 
1010 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
1011 		return 1;
1012 
1013 	lock_rmap(rmapp);
1014 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
1015 		goto out;
1016 
1017 	if (*rmapp & KVMPPC_RMAP_PRESENT) {
1018 		i = head = *rmapp & KVMPPC_RMAP_INDEX;
1019 		do {
1020 			hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
1021 			j = rev[i].forw;
1022 			if (hp[1] & HPTE_R_R)
1023 				goto out;
1024 		} while ((i = j) != head);
1025 	}
1026 	ret = 0;
1027 
1028  out:
1029 	unlock_rmap(rmapp);
1030 	return ret;
1031 }
1032 
1033 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1034 {
1035 	if (!kvm->arch.using_mmu_notifiers)
1036 		return 0;
1037 	return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1038 }
1039 
1040 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1041 {
1042 	if (!kvm->arch.using_mmu_notifiers)
1043 		return;
1044 	kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
1045 }
1046 
1047 static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
1048 {
1049 	struct revmap_entry *rev = kvm->arch.revmap;
1050 	unsigned long head, i, j;
1051 	unsigned long *hptep;
1052 	int ret = 0;
1053 
1054  retry:
1055 	lock_rmap(rmapp);
1056 	if (*rmapp & KVMPPC_RMAP_CHANGED) {
1057 		*rmapp &= ~KVMPPC_RMAP_CHANGED;
1058 		ret = 1;
1059 	}
1060 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
1061 		unlock_rmap(rmapp);
1062 		return ret;
1063 	}
1064 
1065 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
1066 	do {
1067 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
1068 		j = rev[i].forw;
1069 
1070 		if (!(hptep[1] & HPTE_R_C))
1071 			continue;
1072 
1073 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1074 			/* unlock rmap before spinning on the HPTE lock */
1075 			unlock_rmap(rmapp);
1076 			while (hptep[0] & HPTE_V_HVLOCK)
1077 				cpu_relax();
1078 			goto retry;
1079 		}
1080 
1081 		/* Now check and modify the HPTE */
1082 		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
1083 			/* need to make it temporarily absent to clear C */
1084 			hptep[0] |= HPTE_V_ABSENT;
1085 			kvmppc_invalidate_hpte(kvm, hptep, i);
1086 			hptep[1] &= ~HPTE_R_C;
1087 			eieio();
1088 			hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
1089 			if (!(rev[i].guest_rpte & HPTE_R_C)) {
1090 				rev[i].guest_rpte |= HPTE_R_C;
1091 				note_hpte_modification(kvm, &rev[i]);
1092 			}
1093 			ret = 1;
1094 		}
1095 		hptep[0] &= ~HPTE_V_HVLOCK;
1096 	} while ((i = j) != head);
1097 
1098 	unlock_rmap(rmapp);
1099 	return ret;
1100 }
1101 
1102 static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1103 			      struct kvm_memory_slot *memslot,
1104 			      unsigned long *map)
1105 {
1106 	unsigned long gfn;
1107 
1108 	if (!vpa->dirty || !vpa->pinned_addr)
1109 		return;
1110 	gfn = vpa->gpa >> PAGE_SHIFT;
1111 	if (gfn < memslot->base_gfn ||
1112 	    gfn >= memslot->base_gfn + memslot->npages)
1113 		return;
1114 
1115 	vpa->dirty = false;
1116 	if (map)
1117 		__set_bit_le(gfn - memslot->base_gfn, map);
1118 }
1119 
1120 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1121 			     unsigned long *map)
1122 {
1123 	unsigned long i;
1124 	unsigned long *rmapp;
1125 	struct kvm_vcpu *vcpu;
1126 
1127 	preempt_disable();
1128 	rmapp = memslot->arch.rmap;
1129 	for (i = 0; i < memslot->npages; ++i) {
1130 		if (kvm_test_clear_dirty(kvm, rmapp) && map)
1131 			__set_bit_le(i, map);
1132 		++rmapp;
1133 	}
1134 
1135 	/* Harvest dirty bits from VPA and DTL updates */
1136 	/* Note: we never modify the SLB shadow buffer areas */
1137 	kvm_for_each_vcpu(i, vcpu, kvm) {
1138 		spin_lock(&vcpu->arch.vpa_update_lock);
1139 		harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
1140 		harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
1141 		spin_unlock(&vcpu->arch.vpa_update_lock);
1142 	}
1143 	preempt_enable();
1144 	return 0;
1145 }
1146 
1147 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1148 			    unsigned long *nb_ret)
1149 {
1150 	struct kvm_memory_slot *memslot;
1151 	unsigned long gfn = gpa >> PAGE_SHIFT;
1152 	struct page *page, *pages[1];
1153 	int npages;
1154 	unsigned long hva, offset;
1155 	unsigned long pa;
1156 	unsigned long *physp;
1157 	int srcu_idx;
1158 
1159 	srcu_idx = srcu_read_lock(&kvm->srcu);
1160 	memslot = gfn_to_memslot(kvm, gfn);
1161 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1162 		goto err;
1163 	if (!kvm->arch.using_mmu_notifiers) {
1164 		physp = memslot->arch.slot_phys;
1165 		if (!physp)
1166 			goto err;
1167 		physp += gfn - memslot->base_gfn;
1168 		pa = *physp;
1169 		if (!pa) {
1170 			if (kvmppc_get_guest_page(kvm, gfn, memslot,
1171 						  PAGE_SIZE) < 0)
1172 				goto err;
1173 			pa = *physp;
1174 		}
1175 		page = pfn_to_page(pa >> PAGE_SHIFT);
1176 		get_page(page);
1177 	} else {
1178 		hva = gfn_to_hva_memslot(memslot, gfn);
1179 		npages = get_user_pages_fast(hva, 1, 1, pages);
1180 		if (npages < 1)
1181 			goto err;
1182 		page = pages[0];
1183 	}
1184 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1185 
1186 	offset = gpa & (PAGE_SIZE - 1);
1187 	if (nb_ret)
1188 		*nb_ret = PAGE_SIZE - offset;
1189 	return page_address(page) + offset;
1190 
1191  err:
1192 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1193 	return NULL;
1194 }
1195 
1196 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1197 			     bool dirty)
1198 {
1199 	struct page *page = virt_to_page(va);
1200 	struct kvm_memory_slot *memslot;
1201 	unsigned long gfn;
1202 	unsigned long *rmap;
1203 	int srcu_idx;
1204 
1205 	put_page(page);
1206 
1207 	if (!dirty || !kvm->arch.using_mmu_notifiers)
1208 		return;
1209 
1210 	/* We need to mark this page dirty in the rmap chain */
1211 	gfn = gpa >> PAGE_SHIFT;
1212 	srcu_idx = srcu_read_lock(&kvm->srcu);
1213 	memslot = gfn_to_memslot(kvm, gfn);
1214 	if (memslot) {
1215 		rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
1216 		lock_rmap(rmap);
1217 		*rmap |= KVMPPC_RMAP_CHANGED;
1218 		unlock_rmap(rmap);
1219 	}
1220 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1221 }
1222 
1223 /*
1224  * Functions for reading and writing the hash table via reads and
1225  * writes on a file descriptor.
1226  *
1227  * Reads return the guest view of the hash table, which has to be
1228  * pieced together from the real hash table and the guest_rpte
1229  * values in the revmap array.
1230  *
1231  * On writes, each HPTE written is considered in turn, and if it
1232  * is valid, it is written to the HPT as if an H_ENTER with the
1233  * exact flag set was done.  When the invalid count is non-zero
1234  * in the header written to the stream, the kernel will make
1235  * sure that that many HPTEs are invalid, and invalidate them
1236  * if not.
1237  */
1238 
1239 struct kvm_htab_ctx {
1240 	unsigned long	index;
1241 	unsigned long	flags;
1242 	struct kvm	*kvm;
1243 	int		first_pass;
1244 };
1245 
1246 #define HPTE_SIZE	(2 * sizeof(unsigned long))
1247 
1248 /*
1249  * Returns 1 if this HPT entry has been modified or has pending
1250  * R/C bit changes.
1251  */
1252 static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
1253 {
1254 	unsigned long rcbits_unset;
1255 
1256 	if (revp->guest_rpte & HPTE_GR_MODIFIED)
1257 		return 1;
1258 
1259 	/* Also need to consider changes in reference and changed bits */
1260 	rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1261 	if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
1262 		return 1;
1263 
1264 	return 0;
1265 }
1266 
1267 static long record_hpte(unsigned long flags, unsigned long *hptp,
1268 			unsigned long *hpte, struct revmap_entry *revp,
1269 			int want_valid, int first_pass)
1270 {
1271 	unsigned long v, r;
1272 	unsigned long rcbits_unset;
1273 	int ok = 1;
1274 	int valid, dirty;
1275 
1276 	/* Unmodified entries are uninteresting except on the first pass */
1277 	dirty = hpte_dirty(revp, hptp);
1278 	if (!first_pass && !dirty)
1279 		return 0;
1280 
1281 	valid = 0;
1282 	if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1283 		valid = 1;
1284 		if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1285 		    !(hptp[0] & HPTE_V_BOLTED))
1286 			valid = 0;
1287 	}
1288 	if (valid != want_valid)
1289 		return 0;
1290 
1291 	v = r = 0;
1292 	if (valid || dirty) {
1293 		/* lock the HPTE so it's stable and read it */
1294 		preempt_disable();
1295 		while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1296 			cpu_relax();
1297 		v = hptp[0];
1298 
1299 		/* re-evaluate valid and dirty from synchronized HPTE value */
1300 		valid = !!(v & HPTE_V_VALID);
1301 		dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1302 
1303 		/* Harvest R and C into guest view if necessary */
1304 		rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1305 		if (valid && (rcbits_unset & hptp[1])) {
1306 			revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
1307 				HPTE_GR_MODIFIED;
1308 			dirty = 1;
1309 		}
1310 
1311 		if (v & HPTE_V_ABSENT) {
1312 			v &= ~HPTE_V_ABSENT;
1313 			v |= HPTE_V_VALID;
1314 			valid = 1;
1315 		}
1316 		if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1317 			valid = 0;
1318 
1319 		r = revp->guest_rpte;
1320 		/* only clear modified if this is the right sort of entry */
1321 		if (valid == want_valid && dirty) {
1322 			r &= ~HPTE_GR_MODIFIED;
1323 			revp->guest_rpte = r;
1324 		}
1325 		asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
1326 		hptp[0] &= ~HPTE_V_HVLOCK;
1327 		preempt_enable();
1328 		if (!(valid == want_valid && (first_pass || dirty)))
1329 			ok = 0;
1330 	}
1331 	hpte[0] = v;
1332 	hpte[1] = r;
1333 	return ok;
1334 }
1335 
1336 static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1337 			     size_t count, loff_t *ppos)
1338 {
1339 	struct kvm_htab_ctx *ctx = file->private_data;
1340 	struct kvm *kvm = ctx->kvm;
1341 	struct kvm_get_htab_header hdr;
1342 	unsigned long *hptp;
1343 	struct revmap_entry *revp;
1344 	unsigned long i, nb, nw;
1345 	unsigned long __user *lbuf;
1346 	struct kvm_get_htab_header __user *hptr;
1347 	unsigned long flags;
1348 	int first_pass;
1349 	unsigned long hpte[2];
1350 
1351 	if (!access_ok(VERIFY_WRITE, buf, count))
1352 		return -EFAULT;
1353 
1354 	first_pass = ctx->first_pass;
1355 	flags = ctx->flags;
1356 
1357 	i = ctx->index;
1358 	hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1359 	revp = kvm->arch.revmap + i;
1360 	lbuf = (unsigned long __user *)buf;
1361 
1362 	nb = 0;
1363 	while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1364 		/* Initialize header */
1365 		hptr = (struct kvm_get_htab_header __user *)buf;
1366 		hdr.n_valid = 0;
1367 		hdr.n_invalid = 0;
1368 		nw = nb;
1369 		nb += sizeof(hdr);
1370 		lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1371 
1372 		/* Skip uninteresting entries, i.e. clean on not-first pass */
1373 		if (!first_pass) {
1374 			while (i < kvm->arch.hpt_npte &&
1375 			       !hpte_dirty(revp, hptp)) {
1376 				++i;
1377 				hptp += 2;
1378 				++revp;
1379 			}
1380 		}
1381 		hdr.index = i;
1382 
1383 		/* Grab a series of valid entries */
1384 		while (i < kvm->arch.hpt_npte &&
1385 		       hdr.n_valid < 0xffff &&
1386 		       nb + HPTE_SIZE < count &&
1387 		       record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1388 			/* valid entry, write it out */
1389 			++hdr.n_valid;
1390 			if (__put_user(hpte[0], lbuf) ||
1391 			    __put_user(hpte[1], lbuf + 1))
1392 				return -EFAULT;
1393 			nb += HPTE_SIZE;
1394 			lbuf += 2;
1395 			++i;
1396 			hptp += 2;
1397 			++revp;
1398 		}
1399 		/* Now skip invalid entries while we can */
1400 		while (i < kvm->arch.hpt_npte &&
1401 		       hdr.n_invalid < 0xffff &&
1402 		       record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1403 			/* found an invalid entry */
1404 			++hdr.n_invalid;
1405 			++i;
1406 			hptp += 2;
1407 			++revp;
1408 		}
1409 
1410 		if (hdr.n_valid || hdr.n_invalid) {
1411 			/* write back the header */
1412 			if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1413 				return -EFAULT;
1414 			nw = nb;
1415 			buf = (char __user *)lbuf;
1416 		} else {
1417 			nb = nw;
1418 		}
1419 
1420 		/* Check if we've wrapped around the hash table */
1421 		if (i >= kvm->arch.hpt_npte) {
1422 			i = 0;
1423 			ctx->first_pass = 0;
1424 			break;
1425 		}
1426 	}
1427 
1428 	ctx->index = i;
1429 
1430 	return nb;
1431 }
1432 
1433 static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1434 			      size_t count, loff_t *ppos)
1435 {
1436 	struct kvm_htab_ctx *ctx = file->private_data;
1437 	struct kvm *kvm = ctx->kvm;
1438 	struct kvm_get_htab_header hdr;
1439 	unsigned long i, j;
1440 	unsigned long v, r;
1441 	unsigned long __user *lbuf;
1442 	unsigned long *hptp;
1443 	unsigned long tmp[2];
1444 	ssize_t nb;
1445 	long int err, ret;
1446 	int rma_setup;
1447 
1448 	if (!access_ok(VERIFY_READ, buf, count))
1449 		return -EFAULT;
1450 
1451 	/* lock out vcpus from running while we're doing this */
1452 	mutex_lock(&kvm->lock);
1453 	rma_setup = kvm->arch.rma_setup_done;
1454 	if (rma_setup) {
1455 		kvm->arch.rma_setup_done = 0;	/* temporarily */
1456 		/* order rma_setup_done vs. vcpus_running */
1457 		smp_mb();
1458 		if (atomic_read(&kvm->arch.vcpus_running)) {
1459 			kvm->arch.rma_setup_done = 1;
1460 			mutex_unlock(&kvm->lock);
1461 			return -EBUSY;
1462 		}
1463 	}
1464 
1465 	err = 0;
1466 	for (nb = 0; nb + sizeof(hdr) <= count; ) {
1467 		err = -EFAULT;
1468 		if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1469 			break;
1470 
1471 		err = 0;
1472 		if (nb + hdr.n_valid * HPTE_SIZE > count)
1473 			break;
1474 
1475 		nb += sizeof(hdr);
1476 		buf += sizeof(hdr);
1477 
1478 		err = -EINVAL;
1479 		i = hdr.index;
1480 		if (i >= kvm->arch.hpt_npte ||
1481 		    i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
1482 			break;
1483 
1484 		hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1485 		lbuf = (unsigned long __user *)buf;
1486 		for (j = 0; j < hdr.n_valid; ++j) {
1487 			err = -EFAULT;
1488 			if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
1489 				goto out;
1490 			err = -EINVAL;
1491 			if (!(v & HPTE_V_VALID))
1492 				goto out;
1493 			lbuf += 2;
1494 			nb += HPTE_SIZE;
1495 
1496 			if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
1497 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1498 			err = -EIO;
1499 			ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1500 							 tmp);
1501 			if (ret != H_SUCCESS) {
1502 				pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1503 				       "r=%lx\n", ret, i, v, r);
1504 				goto out;
1505 			}
1506 			if (!rma_setup && is_vrma_hpte(v)) {
1507 				unsigned long psize = hpte_page_size(v, r);
1508 				unsigned long senc = slb_pgsize_encoding(psize);
1509 				unsigned long lpcr;
1510 
1511 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1512 					(VRMA_VSID << SLB_VSID_SHIFT_1T);
1513 				lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1514 				lpcr |= senc << (LPCR_VRMASD_SH - 4);
1515 				kvm->arch.lpcr = lpcr;
1516 				rma_setup = 1;
1517 			}
1518 			++i;
1519 			hptp += 2;
1520 		}
1521 
1522 		for (j = 0; j < hdr.n_invalid; ++j) {
1523 			if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
1524 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1525 			++i;
1526 			hptp += 2;
1527 		}
1528 		err = 0;
1529 	}
1530 
1531  out:
1532 	/* Order HPTE updates vs. rma_setup_done */
1533 	smp_wmb();
1534 	kvm->arch.rma_setup_done = rma_setup;
1535 	mutex_unlock(&kvm->lock);
1536 
1537 	if (err)
1538 		return err;
1539 	return nb;
1540 }
1541 
1542 static int kvm_htab_release(struct inode *inode, struct file *filp)
1543 {
1544 	struct kvm_htab_ctx *ctx = filp->private_data;
1545 
1546 	filp->private_data = NULL;
1547 	if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1548 		atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1549 	kvm_put_kvm(ctx->kvm);
1550 	kfree(ctx);
1551 	return 0;
1552 }
1553 
1554 static const struct file_operations kvm_htab_fops = {
1555 	.read		= kvm_htab_read,
1556 	.write		= kvm_htab_write,
1557 	.llseek		= default_llseek,
1558 	.release	= kvm_htab_release,
1559 };
1560 
1561 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1562 {
1563 	int ret;
1564 	struct kvm_htab_ctx *ctx;
1565 	int rwflag;
1566 
1567 	/* reject flags we don't recognize */
1568 	if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1569 		return -EINVAL;
1570 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1571 	if (!ctx)
1572 		return -ENOMEM;
1573 	kvm_get_kvm(kvm);
1574 	ctx->kvm = kvm;
1575 	ctx->index = ghf->start_index;
1576 	ctx->flags = ghf->flags;
1577 	ctx->first_pass = 1;
1578 
1579 	rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1580 	ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag);
1581 	if (ret < 0) {
1582 		kvm_put_kvm(kvm);
1583 		return ret;
1584 	}
1585 
1586 	if (rwflag == O_RDONLY) {
1587 		mutex_lock(&kvm->slots_lock);
1588 		atomic_inc(&kvm->arch.hpte_mod_interest);
1589 		/* make sure kvmppc_do_h_enter etc. see the increment */
1590 		synchronize_srcu_expedited(&kvm->srcu);
1591 		mutex_unlock(&kvm->slots_lock);
1592 	}
1593 
1594 	return ret;
1595 }
1596 
1597 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1598 {
1599 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1600 
1601 	if (cpu_has_feature(CPU_FTR_ARCH_206))
1602 		vcpu->arch.slb_nr = 32;		/* POWER7 */
1603 	else
1604 		vcpu->arch.slb_nr = 64;
1605 
1606 	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1607 	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1608 
1609 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
1610 }
1611