1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  */
17 
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27 #include <linux/srcu.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/debugfs.h>
31 
32 #include <asm/tlbflush.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/book3s/64/mmu-hash.h>
36 #include <asm/hvcall.h>
37 #include <asm/synch.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/cputable.h>
40 #include <asm/pte-walk.h>
41 
42 #include "trace_hv.h"
43 
44 //#define DEBUG_RESIZE_HPT	1
45 
46 #ifdef DEBUG_RESIZE_HPT
47 #define resize_hpt_debug(resize, ...)				\
48 	do {							\
49 		printk(KERN_DEBUG "RESIZE HPT %p: ", resize);	\
50 		printk(__VA_ARGS__);				\
51 	} while (0)
52 #else
53 #define resize_hpt_debug(resize, ...)				\
54 	do { } while (0)
55 #endif
56 
57 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
58 				long pte_index, unsigned long pteh,
59 				unsigned long ptel, unsigned long *pte_idx_ret);
60 
61 struct kvm_resize_hpt {
62 	/* These fields read-only after init */
63 	struct kvm *kvm;
64 	struct work_struct work;
65 	u32 order;
66 
67 	/* These fields protected by kvm->lock */
68 
69 	/* Possible values and their usage:
70 	 *  <0     an error occurred during allocation,
71 	 *  -EBUSY allocation is in the progress,
72 	 *  0      allocation made successfuly.
73 	 */
74 	int error;
75 
76 	/* Private to the work thread, until error != -EBUSY,
77 	 * then protected by kvm->lock.
78 	 */
79 	struct kvm_hpt_info hpt;
80 };
81 
82 int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
83 {
84 	unsigned long hpt = 0;
85 	int cma = 0;
86 	struct page *page = NULL;
87 	struct revmap_entry *rev;
88 	unsigned long npte;
89 
90 	if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER))
91 		return -EINVAL;
92 
93 	page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
94 	if (page) {
95 		hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
96 		memset((void *)hpt, 0, (1ul << order));
97 		cma = 1;
98 	}
99 
100 	if (!hpt)
101 		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL
102 				       |__GFP_NOWARN, order - PAGE_SHIFT);
103 
104 	if (!hpt)
105 		return -ENOMEM;
106 
107 	/* HPTEs are 2**4 bytes long */
108 	npte = 1ul << (order - 4);
109 
110 	/* Allocate reverse map array */
111 	rev = vmalloc(sizeof(struct revmap_entry) * npte);
112 	if (!rev) {
113 		if (cma)
114 			kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
115 		else
116 			free_pages(hpt, order - PAGE_SHIFT);
117 		return -ENOMEM;
118 	}
119 
120 	info->order = order;
121 	info->virt = hpt;
122 	info->cma = cma;
123 	info->rev = rev;
124 
125 	return 0;
126 }
127 
128 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
129 {
130 	atomic64_set(&kvm->arch.mmio_update, 0);
131 	kvm->arch.hpt = *info;
132 	kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
133 
134 	pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
135 		 info->virt, (long)info->order, kvm->arch.lpid);
136 }
137 
138 long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
139 {
140 	long err = -EBUSY;
141 	struct kvm_hpt_info info;
142 
143 	mutex_lock(&kvm->lock);
144 	if (kvm->arch.mmu_ready) {
145 		kvm->arch.mmu_ready = 0;
146 		/* order mmu_ready vs. vcpus_running */
147 		smp_mb();
148 		if (atomic_read(&kvm->arch.vcpus_running)) {
149 			kvm->arch.mmu_ready = 1;
150 			goto out;
151 		}
152 	}
153 	if (kvm_is_radix(kvm)) {
154 		err = kvmppc_switch_mmu_to_hpt(kvm);
155 		if (err)
156 			goto out;
157 	}
158 
159 	if (kvm->arch.hpt.order == order) {
160 		/* We already have a suitable HPT */
161 
162 		/* Set the entire HPT to 0, i.e. invalid HPTEs */
163 		memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
164 		/*
165 		 * Reset all the reverse-mapping chains for all memslots
166 		 */
167 		kvmppc_rmap_reset(kvm);
168 		err = 0;
169 		goto out;
170 	}
171 
172 	if (kvm->arch.hpt.virt) {
173 		kvmppc_free_hpt(&kvm->arch.hpt);
174 		kvmppc_rmap_reset(kvm);
175 	}
176 
177 	err = kvmppc_allocate_hpt(&info, order);
178 	if (err < 0)
179 		goto out;
180 	kvmppc_set_hpt(kvm, &info);
181 
182 out:
183 	if (err == 0)
184 		/* Ensure that each vcpu will flush its TLB on next entry. */
185 		cpumask_setall(&kvm->arch.need_tlb_flush);
186 
187 	mutex_unlock(&kvm->lock);
188 	return err;
189 }
190 
191 void kvmppc_free_hpt(struct kvm_hpt_info *info)
192 {
193 	vfree(info->rev);
194 	info->rev = NULL;
195 	if (info->cma)
196 		kvm_free_hpt_cma(virt_to_page(info->virt),
197 				 1 << (info->order - PAGE_SHIFT));
198 	else if (info->virt)
199 		free_pages(info->virt, info->order - PAGE_SHIFT);
200 	info->virt = 0;
201 	info->order = 0;
202 }
203 
204 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
205 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
206 {
207 	return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
208 }
209 
210 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
211 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
212 {
213 	return (pgsize == 0x10000) ? 0x1000 : 0;
214 }
215 
216 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
217 		     unsigned long porder)
218 {
219 	unsigned long i;
220 	unsigned long npages;
221 	unsigned long hp_v, hp_r;
222 	unsigned long addr, hash;
223 	unsigned long psize;
224 	unsigned long hp0, hp1;
225 	unsigned long idx_ret;
226 	long ret;
227 	struct kvm *kvm = vcpu->kvm;
228 
229 	psize = 1ul << porder;
230 	npages = memslot->npages >> (porder - PAGE_SHIFT);
231 
232 	/* VRMA can't be > 1TB */
233 	if (npages > 1ul << (40 - porder))
234 		npages = 1ul << (40 - porder);
235 	/* Can't use more than 1 HPTE per HPTEG */
236 	if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
237 		npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;
238 
239 	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
240 		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
241 	hp1 = hpte1_pgsize_encoding(psize) |
242 		HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
243 
244 	for (i = 0; i < npages; ++i) {
245 		addr = i << porder;
246 		/* can't use hpt_hash since va > 64 bits */
247 		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25)))
248 			& kvmppc_hpt_mask(&kvm->arch.hpt);
249 		/*
250 		 * We assume that the hash table is empty and no
251 		 * vcpus are using it at this stage.  Since we create
252 		 * at most one HPTE per HPTEG, we just assume entry 7
253 		 * is available and use it.
254 		 */
255 		hash = (hash << 3) + 7;
256 		hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
257 		hp_r = hp1 | addr;
258 		ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
259 						 &idx_ret);
260 		if (ret != H_SUCCESS) {
261 			pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
262 			       addr, ret);
263 			break;
264 		}
265 	}
266 }
267 
268 int kvmppc_mmu_hv_init(void)
269 {
270 	unsigned long host_lpid, rsvd_lpid;
271 
272 	if (!cpu_has_feature(CPU_FTR_HVMODE))
273 		return -EINVAL;
274 
275 	/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
276 	host_lpid = mfspr(SPRN_LPID);
277 	rsvd_lpid = LPID_RSVD;
278 
279 	kvmppc_init_lpid(rsvd_lpid + 1);
280 
281 	kvmppc_claim_lpid(host_lpid);
282 	/* rsvd_lpid is reserved for use in partition switching */
283 	kvmppc_claim_lpid(rsvd_lpid);
284 
285 	return 0;
286 }
287 
288 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
289 {
290 	unsigned long msr = vcpu->arch.intr_msr;
291 
292 	/* If transactional, change to suspend mode on IRQ delivery */
293 	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
294 		msr |= MSR_TS_S;
295 	else
296 		msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
297 	kvmppc_set_msr(vcpu, msr);
298 }
299 
300 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
301 				long pte_index, unsigned long pteh,
302 				unsigned long ptel, unsigned long *pte_idx_ret)
303 {
304 	long ret;
305 
306 	/* Protect linux PTE lookup from page table destruction */
307 	rcu_read_lock_sched();	/* this disables preemption too */
308 	ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
309 				current->mm->pgd, false, pte_idx_ret);
310 	rcu_read_unlock_sched();
311 	if (ret == H_TOO_HARD) {
312 		/* this can't happen */
313 		pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
314 		ret = H_RESOURCE;	/* or something */
315 	}
316 	return ret;
317 
318 }
319 
320 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
321 							 gva_t eaddr)
322 {
323 	u64 mask;
324 	int i;
325 
326 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
327 		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
328 			continue;
329 
330 		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
331 			mask = ESID_MASK_1T;
332 		else
333 			mask = ESID_MASK;
334 
335 		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
336 			return &vcpu->arch.slb[i];
337 	}
338 	return NULL;
339 }
340 
341 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
342 			unsigned long ea)
343 {
344 	unsigned long ra_mask;
345 
346 	ra_mask = kvmppc_actual_pgsz(v, r) - 1;
347 	return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
348 }
349 
350 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
351 			struct kvmppc_pte *gpte, bool data, bool iswrite)
352 {
353 	struct kvm *kvm = vcpu->kvm;
354 	struct kvmppc_slb *slbe;
355 	unsigned long slb_v;
356 	unsigned long pp, key;
357 	unsigned long v, orig_v, gr;
358 	__be64 *hptep;
359 	int index;
360 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
361 
362 	if (kvm_is_radix(vcpu->kvm))
363 		return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
364 
365 	/* Get SLB entry */
366 	if (virtmode) {
367 		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
368 		if (!slbe)
369 			return -EINVAL;
370 		slb_v = slbe->origv;
371 	} else {
372 		/* real mode access */
373 		slb_v = vcpu->kvm->arch.vrma_slb_v;
374 	}
375 
376 	preempt_disable();
377 	/* Find the HPTE in the hash table */
378 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
379 					 HPTE_V_VALID | HPTE_V_ABSENT);
380 	if (index < 0) {
381 		preempt_enable();
382 		return -ENOENT;
383 	}
384 	hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
385 	v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
386 	if (cpu_has_feature(CPU_FTR_ARCH_300))
387 		v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
388 	gr = kvm->arch.hpt.rev[index].guest_rpte;
389 
390 	unlock_hpte(hptep, orig_v);
391 	preempt_enable();
392 
393 	gpte->eaddr = eaddr;
394 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
395 
396 	/* Get PP bits and key for permission check */
397 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
398 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
399 	key &= slb_v;
400 
401 	/* Calculate permissions */
402 	gpte->may_read = hpte_read_permission(pp, key);
403 	gpte->may_write = hpte_write_permission(pp, key);
404 	gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
405 
406 	/* Storage key permission check for POWER7 */
407 	if (data && virtmode) {
408 		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
409 		if (amrfield & 1)
410 			gpte->may_read = 0;
411 		if (amrfield & 2)
412 			gpte->may_write = 0;
413 	}
414 
415 	/* Get the guest physical address */
416 	gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
417 	return 0;
418 }
419 
420 /*
421  * Quick test for whether an instruction is a load or a store.
422  * If the instruction is a load or a store, then this will indicate
423  * which it is, at least on server processors.  (Embedded processors
424  * have some external PID instructions that don't follow the rule
425  * embodied here.)  If the instruction isn't a load or store, then
426  * this doesn't return anything useful.
427  */
428 static int instruction_is_store(unsigned int instr)
429 {
430 	unsigned int mask;
431 
432 	mask = 0x10000000;
433 	if ((instr & 0xfc000000) == 0x7c000000)
434 		mask = 0x100;		/* major opcode 31 */
435 	return (instr & mask) != 0;
436 }
437 
438 int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
439 			   unsigned long gpa, gva_t ea, int is_store)
440 {
441 	u32 last_inst;
442 
443 	/*
444 	 * If we fail, we just return to the guest and try executing it again.
445 	 */
446 	if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
447 		EMULATE_DONE)
448 		return RESUME_GUEST;
449 
450 	/*
451 	 * WARNING: We do not know for sure whether the instruction we just
452 	 * read from memory is the same that caused the fault in the first
453 	 * place.  If the instruction we read is neither an load or a store,
454 	 * then it can't access memory, so we don't need to worry about
455 	 * enforcing access permissions.  So, assuming it is a load or
456 	 * store, we just check that its direction (load or store) is
457 	 * consistent with the original fault, since that's what we
458 	 * checked the access permissions against.  If there is a mismatch
459 	 * we just return and retry the instruction.
460 	 */
461 
462 	if (instruction_is_store(last_inst) != !!is_store)
463 		return RESUME_GUEST;
464 
465 	/*
466 	 * Emulated accesses are emulated by looking at the hash for
467 	 * translation once, then performing the access later. The
468 	 * translation could be invalidated in the meantime in which
469 	 * point performing the subsequent memory access on the old
470 	 * physical address could possibly be a security hole for the
471 	 * guest (but not the host).
472 	 *
473 	 * This is less of an issue for MMIO stores since they aren't
474 	 * globally visible. It could be an issue for MMIO loads to
475 	 * a certain extent but we'll ignore it for now.
476 	 */
477 
478 	vcpu->arch.paddr_accessed = gpa;
479 	vcpu->arch.vaddr_accessed = ea;
480 	return kvmppc_emulate_mmio(run, vcpu);
481 }
482 
483 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
484 				unsigned long ea, unsigned long dsisr)
485 {
486 	struct kvm *kvm = vcpu->kvm;
487 	unsigned long hpte[3], r;
488 	unsigned long hnow_v, hnow_r;
489 	__be64 *hptep;
490 	unsigned long mmu_seq, psize, pte_size;
491 	unsigned long gpa_base, gfn_base;
492 	unsigned long gpa, gfn, hva, pfn;
493 	struct kvm_memory_slot *memslot;
494 	unsigned long *rmap;
495 	struct revmap_entry *rev;
496 	struct page *page, *pages[1];
497 	long index, ret, npages;
498 	bool is_ci;
499 	unsigned int writing, write_ok;
500 	struct vm_area_struct *vma;
501 	unsigned long rcbits;
502 	long mmio_update;
503 
504 	if (kvm_is_radix(kvm))
505 		return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
506 
507 	/*
508 	 * Real-mode code has already searched the HPT and found the
509 	 * entry we're interested in.  Lock the entry and check that
510 	 * it hasn't changed.  If it has, just return and re-execute the
511 	 * instruction.
512 	 */
513 	if (ea != vcpu->arch.pgfault_addr)
514 		return RESUME_GUEST;
515 
516 	if (vcpu->arch.pgfault_cache) {
517 		mmio_update = atomic64_read(&kvm->arch.mmio_update);
518 		if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
519 			r = vcpu->arch.pgfault_cache->rpte;
520 			psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0],
521 						   r);
522 			gpa_base = r & HPTE_R_RPN & ~(psize - 1);
523 			gfn_base = gpa_base >> PAGE_SHIFT;
524 			gpa = gpa_base | (ea & (psize - 1));
525 			return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
526 						dsisr & DSISR_ISSTORE);
527 		}
528 	}
529 	index = vcpu->arch.pgfault_index;
530 	hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
531 	rev = &kvm->arch.hpt.rev[index];
532 	preempt_disable();
533 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
534 		cpu_relax();
535 	hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
536 	hpte[1] = be64_to_cpu(hptep[1]);
537 	hpte[2] = r = rev->guest_rpte;
538 	unlock_hpte(hptep, hpte[0]);
539 	preempt_enable();
540 
541 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
542 		hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
543 		hpte[1] = hpte_new_to_old_r(hpte[1]);
544 	}
545 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
546 	    hpte[1] != vcpu->arch.pgfault_hpte[1])
547 		return RESUME_GUEST;
548 
549 	/* Translate the logical address and get the page */
550 	psize = kvmppc_actual_pgsz(hpte[0], r);
551 	gpa_base = r & HPTE_R_RPN & ~(psize - 1);
552 	gfn_base = gpa_base >> PAGE_SHIFT;
553 	gpa = gpa_base | (ea & (psize - 1));
554 	gfn = gpa >> PAGE_SHIFT;
555 	memslot = gfn_to_memslot(kvm, gfn);
556 
557 	trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
558 
559 	/* No memslot means it's an emulated MMIO region */
560 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
561 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
562 					      dsisr & DSISR_ISSTORE);
563 
564 	/*
565 	 * This should never happen, because of the slot_is_aligned()
566 	 * check in kvmppc_do_h_enter().
567 	 */
568 	if (gfn_base < memslot->base_gfn)
569 		return -EFAULT;
570 
571 	/* used to check for invalidations in progress */
572 	mmu_seq = kvm->mmu_notifier_seq;
573 	smp_rmb();
574 
575 	ret = -EFAULT;
576 	is_ci = false;
577 	pfn = 0;
578 	page = NULL;
579 	pte_size = PAGE_SIZE;
580 	writing = (dsisr & DSISR_ISSTORE) != 0;
581 	/* If writing != 0, then the HPTE must allow writing, if we get here */
582 	write_ok = writing;
583 	hva = gfn_to_hva_memslot(memslot, gfn);
584 	npages = get_user_pages_fast(hva, 1, writing, pages);
585 	if (npages < 1) {
586 		/* Check if it's an I/O mapping */
587 		down_read(&current->mm->mmap_sem);
588 		vma = find_vma(current->mm, hva);
589 		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
590 		    (vma->vm_flags & VM_PFNMAP)) {
591 			pfn = vma->vm_pgoff +
592 				((hva - vma->vm_start) >> PAGE_SHIFT);
593 			pte_size = psize;
594 			is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
595 			write_ok = vma->vm_flags & VM_WRITE;
596 		}
597 		up_read(&current->mm->mmap_sem);
598 		if (!pfn)
599 			goto out_put;
600 	} else {
601 		page = pages[0];
602 		pfn = page_to_pfn(page);
603 		if (PageHuge(page)) {
604 			page = compound_head(page);
605 			pte_size <<= compound_order(page);
606 		}
607 		/* if the guest wants write access, see if that is OK */
608 		if (!writing && hpte_is_writable(r)) {
609 			pte_t *ptep, pte;
610 			unsigned long flags;
611 			/*
612 			 * We need to protect against page table destruction
613 			 * hugepage split and collapse.
614 			 */
615 			local_irq_save(flags);
616 			ptep = find_current_mm_pte(current->mm->pgd,
617 						   hva, NULL, NULL);
618 			if (ptep) {
619 				pte = kvmppc_read_update_linux_pte(ptep, 1);
620 				if (__pte_write(pte))
621 					write_ok = 1;
622 			}
623 			local_irq_restore(flags);
624 		}
625 	}
626 
627 	if (psize > pte_size)
628 		goto out_put;
629 
630 	/* Check WIMG vs. the actual page we're accessing */
631 	if (!hpte_cache_flags_ok(r, is_ci)) {
632 		if (is_ci)
633 			goto out_put;
634 		/*
635 		 * Allow guest to map emulated device memory as
636 		 * uncacheable, but actually make it cacheable.
637 		 */
638 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
639 	}
640 
641 	/*
642 	 * Set the HPTE to point to pfn.
643 	 * Since the pfn is at PAGE_SIZE granularity, make sure we
644 	 * don't mask out lower-order bits if psize < PAGE_SIZE.
645 	 */
646 	if (psize < PAGE_SIZE)
647 		psize = PAGE_SIZE;
648 	r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
649 					((pfn << PAGE_SHIFT) & ~(psize - 1));
650 	if (hpte_is_writable(r) && !write_ok)
651 		r = hpte_make_readonly(r);
652 	ret = RESUME_GUEST;
653 	preempt_disable();
654 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
655 		cpu_relax();
656 	hnow_v = be64_to_cpu(hptep[0]);
657 	hnow_r = be64_to_cpu(hptep[1]);
658 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
659 		hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
660 		hnow_r = hpte_new_to_old_r(hnow_r);
661 	}
662 
663 	/*
664 	 * If the HPT is being resized, don't update the HPTE,
665 	 * instead let the guest retry after the resize operation is complete.
666 	 * The synchronization for mmu_ready test vs. set is provided
667 	 * by the HPTE lock.
668 	 */
669 	if (!kvm->arch.mmu_ready)
670 		goto out_unlock;
671 
672 	if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
673 	    rev->guest_rpte != hpte[2])
674 		/* HPTE has been changed under us; let the guest retry */
675 		goto out_unlock;
676 	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
677 
678 	/* Always put the HPTE in the rmap chain for the page base address */
679 	rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
680 	lock_rmap(rmap);
681 
682 	/* Check if we might have been invalidated; let the guest retry if so */
683 	ret = RESUME_GUEST;
684 	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
685 		unlock_rmap(rmap);
686 		goto out_unlock;
687 	}
688 
689 	/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
690 	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
691 	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
692 
693 	if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
694 		/* HPTE was previously valid, so we need to invalidate it */
695 		unlock_rmap(rmap);
696 		hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
697 		kvmppc_invalidate_hpte(kvm, hptep, index);
698 		/* don't lose previous R and C bits */
699 		r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
700 	} else {
701 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
702 	}
703 
704 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
705 		r = hpte_old_to_new_r(hpte[0], r);
706 		hpte[0] = hpte_old_to_new_v(hpte[0]);
707 	}
708 	hptep[1] = cpu_to_be64(r);
709 	eieio();
710 	__unlock_hpte(hptep, hpte[0]);
711 	asm volatile("ptesync" : : : "memory");
712 	preempt_enable();
713 	if (page && hpte_is_writable(r))
714 		SetPageDirty(page);
715 
716  out_put:
717 	trace_kvm_page_fault_exit(vcpu, hpte, ret);
718 
719 	if (page) {
720 		/*
721 		 * We drop pages[0] here, not page because page might
722 		 * have been set to the head page of a compound, but
723 		 * we have to drop the reference on the correct tail
724 		 * page to match the get inside gup()
725 		 */
726 		put_page(pages[0]);
727 	}
728 	return ret;
729 
730  out_unlock:
731 	__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
732 	preempt_enable();
733 	goto out_put;
734 }
735 
736 void kvmppc_rmap_reset(struct kvm *kvm)
737 {
738 	struct kvm_memslots *slots;
739 	struct kvm_memory_slot *memslot;
740 	int srcu_idx;
741 
742 	srcu_idx = srcu_read_lock(&kvm->srcu);
743 	slots = kvm_memslots(kvm);
744 	kvm_for_each_memslot(memslot, slots) {
745 		/*
746 		 * This assumes it is acceptable to lose reference and
747 		 * change bits across a reset.
748 		 */
749 		memset(memslot->arch.rmap, 0,
750 		       memslot->npages * sizeof(*memslot->arch.rmap));
751 	}
752 	srcu_read_unlock(&kvm->srcu, srcu_idx);
753 }
754 
755 typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
756 			      unsigned long gfn);
757 
758 static int kvm_handle_hva_range(struct kvm *kvm,
759 				unsigned long start,
760 				unsigned long end,
761 				hva_handler_fn handler)
762 {
763 	int ret;
764 	int retval = 0;
765 	struct kvm_memslots *slots;
766 	struct kvm_memory_slot *memslot;
767 
768 	slots = kvm_memslots(kvm);
769 	kvm_for_each_memslot(memslot, slots) {
770 		unsigned long hva_start, hva_end;
771 		gfn_t gfn, gfn_end;
772 
773 		hva_start = max(start, memslot->userspace_addr);
774 		hva_end = min(end, memslot->userspace_addr +
775 					(memslot->npages << PAGE_SHIFT));
776 		if (hva_start >= hva_end)
777 			continue;
778 		/*
779 		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
780 		 * {gfn, gfn+1, ..., gfn_end-1}.
781 		 */
782 		gfn = hva_to_gfn_memslot(hva_start, memslot);
783 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
784 
785 		for (; gfn < gfn_end; ++gfn) {
786 			ret = handler(kvm, memslot, gfn);
787 			retval |= ret;
788 		}
789 	}
790 
791 	return retval;
792 }
793 
794 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
795 			  hva_handler_fn handler)
796 {
797 	return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
798 }
799 
800 /* Must be called with both HPTE and rmap locked */
801 static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
802 			      struct kvm_memory_slot *memslot,
803 			      unsigned long *rmapp, unsigned long gfn)
804 {
805 	__be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
806 	struct revmap_entry *rev = kvm->arch.hpt.rev;
807 	unsigned long j, h;
808 	unsigned long ptel, psize, rcbits;
809 
810 	j = rev[i].forw;
811 	if (j == i) {
812 		/* chain is now empty */
813 		*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
814 	} else {
815 		/* remove i from chain */
816 		h = rev[i].back;
817 		rev[h].forw = j;
818 		rev[j].back = h;
819 		rev[i].forw = rev[i].back = i;
820 		*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
821 	}
822 
823 	/* Now check and modify the HPTE */
824 	ptel = rev[i].guest_rpte;
825 	psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel);
826 	if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
827 	    hpte_rpn(ptel, psize) == gfn) {
828 		hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
829 		kvmppc_invalidate_hpte(kvm, hptep, i);
830 		hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
831 		/* Harvest R and C */
832 		rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
833 		*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
834 		if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap)
835 			kvmppc_update_dirty_map(memslot, gfn, psize);
836 		if (rcbits & ~rev[i].guest_rpte) {
837 			rev[i].guest_rpte = ptel | rcbits;
838 			note_hpte_modification(kvm, &rev[i]);
839 		}
840 	}
841 }
842 
843 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
844 			   unsigned long gfn)
845 {
846 	unsigned long i;
847 	__be64 *hptep;
848 	unsigned long *rmapp;
849 
850 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
851 	for (;;) {
852 		lock_rmap(rmapp);
853 		if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
854 			unlock_rmap(rmapp);
855 			break;
856 		}
857 
858 		/*
859 		 * To avoid an ABBA deadlock with the HPTE lock bit,
860 		 * we can't spin on the HPTE lock while holding the
861 		 * rmap chain lock.
862 		 */
863 		i = *rmapp & KVMPPC_RMAP_INDEX;
864 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
865 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
866 			/* unlock rmap before spinning on the HPTE lock */
867 			unlock_rmap(rmapp);
868 			while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
869 				cpu_relax();
870 			continue;
871 		}
872 
873 		kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn);
874 		unlock_rmap(rmapp);
875 		__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
876 	}
877 	return 0;
878 }
879 
880 int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
881 {
882 	hva_handler_fn handler;
883 
884 	handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
885 	kvm_handle_hva_range(kvm, start, end, handler);
886 	return 0;
887 }
888 
889 void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
890 				  struct kvm_memory_slot *memslot)
891 {
892 	unsigned long gfn;
893 	unsigned long n;
894 	unsigned long *rmapp;
895 
896 	gfn = memslot->base_gfn;
897 	rmapp = memslot->arch.rmap;
898 	for (n = memslot->npages; n; --n, ++gfn) {
899 		if (kvm_is_radix(kvm)) {
900 			kvm_unmap_radix(kvm, memslot, gfn);
901 			continue;
902 		}
903 		/*
904 		 * Testing the present bit without locking is OK because
905 		 * the memslot has been marked invalid already, and hence
906 		 * no new HPTEs referencing this page can be created,
907 		 * thus the present bit can't go from 0 to 1.
908 		 */
909 		if (*rmapp & KVMPPC_RMAP_PRESENT)
910 			kvm_unmap_rmapp(kvm, memslot, gfn);
911 		++rmapp;
912 	}
913 }
914 
915 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
916 			 unsigned long gfn)
917 {
918 	struct revmap_entry *rev = kvm->arch.hpt.rev;
919 	unsigned long head, i, j;
920 	__be64 *hptep;
921 	int ret = 0;
922 	unsigned long *rmapp;
923 
924 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
925  retry:
926 	lock_rmap(rmapp);
927 	if (*rmapp & KVMPPC_RMAP_REFERENCED) {
928 		*rmapp &= ~KVMPPC_RMAP_REFERENCED;
929 		ret = 1;
930 	}
931 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
932 		unlock_rmap(rmapp);
933 		return ret;
934 	}
935 
936 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
937 	do {
938 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
939 		j = rev[i].forw;
940 
941 		/* If this HPTE isn't referenced, ignore it */
942 		if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
943 			continue;
944 
945 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
946 			/* unlock rmap before spinning on the HPTE lock */
947 			unlock_rmap(rmapp);
948 			while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
949 				cpu_relax();
950 			goto retry;
951 		}
952 
953 		/* Now check and modify the HPTE */
954 		if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
955 		    (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
956 			kvmppc_clear_ref_hpte(kvm, hptep, i);
957 			if (!(rev[i].guest_rpte & HPTE_R_R)) {
958 				rev[i].guest_rpte |= HPTE_R_R;
959 				note_hpte_modification(kvm, &rev[i]);
960 			}
961 			ret = 1;
962 		}
963 		__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
964 	} while ((i = j) != head);
965 
966 	unlock_rmap(rmapp);
967 	return ret;
968 }
969 
970 int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
971 {
972 	hva_handler_fn handler;
973 
974 	handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
975 	return kvm_handle_hva_range(kvm, start, end, handler);
976 }
977 
978 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
979 			      unsigned long gfn)
980 {
981 	struct revmap_entry *rev = kvm->arch.hpt.rev;
982 	unsigned long head, i, j;
983 	unsigned long *hp;
984 	int ret = 1;
985 	unsigned long *rmapp;
986 
987 	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
988 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
989 		return 1;
990 
991 	lock_rmap(rmapp);
992 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
993 		goto out;
994 
995 	if (*rmapp & KVMPPC_RMAP_PRESENT) {
996 		i = head = *rmapp & KVMPPC_RMAP_INDEX;
997 		do {
998 			hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
999 			j = rev[i].forw;
1000 			if (be64_to_cpu(hp[1]) & HPTE_R_R)
1001 				goto out;
1002 		} while ((i = j) != head);
1003 	}
1004 	ret = 0;
1005 
1006  out:
1007 	unlock_rmap(rmapp);
1008 	return ret;
1009 }
1010 
1011 int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1012 {
1013 	hva_handler_fn handler;
1014 
1015 	handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
1016 	return kvm_handle_hva(kvm, hva, handler);
1017 }
1018 
1019 void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1020 {
1021 	hva_handler_fn handler;
1022 
1023 	handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
1024 	kvm_handle_hva(kvm, hva, handler);
1025 }
1026 
1027 static int vcpus_running(struct kvm *kvm)
1028 {
1029 	return atomic_read(&kvm->arch.vcpus_running) != 0;
1030 }
1031 
1032 /*
1033  * Returns the number of system pages that are dirty.
1034  * This can be more than 1 if we find a huge-page HPTE.
1035  */
1036 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
1037 {
1038 	struct revmap_entry *rev = kvm->arch.hpt.rev;
1039 	unsigned long head, i, j;
1040 	unsigned long n;
1041 	unsigned long v, r;
1042 	__be64 *hptep;
1043 	int npages_dirty = 0;
1044 
1045  retry:
1046 	lock_rmap(rmapp);
1047 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
1048 		unlock_rmap(rmapp);
1049 		return npages_dirty;
1050 	}
1051 
1052 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
1053 	do {
1054 		unsigned long hptep1;
1055 		hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
1056 		j = rev[i].forw;
1057 
1058 		/*
1059 		 * Checking the C (changed) bit here is racy since there
1060 		 * is no guarantee about when the hardware writes it back.
1061 		 * If the HPTE is not writable then it is stable since the
1062 		 * page can't be written to, and we would have done a tlbie
1063 		 * (which forces the hardware to complete any writeback)
1064 		 * when making the HPTE read-only.
1065 		 * If vcpus are running then this call is racy anyway
1066 		 * since the page could get dirtied subsequently, so we
1067 		 * expect there to be a further call which would pick up
1068 		 * any delayed C bit writeback.
1069 		 * Otherwise we need to do the tlbie even if C==0 in
1070 		 * order to pick up any delayed writeback of C.
1071 		 */
1072 		hptep1 = be64_to_cpu(hptep[1]);
1073 		if (!(hptep1 & HPTE_R_C) &&
1074 		    (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
1075 			continue;
1076 
1077 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1078 			/* unlock rmap before spinning on the HPTE lock */
1079 			unlock_rmap(rmapp);
1080 			while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
1081 				cpu_relax();
1082 			goto retry;
1083 		}
1084 
1085 		/* Now check and modify the HPTE */
1086 		if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
1087 			__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
1088 			continue;
1089 		}
1090 
1091 		/* need to make it temporarily absent so C is stable */
1092 		hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
1093 		kvmppc_invalidate_hpte(kvm, hptep, i);
1094 		v = be64_to_cpu(hptep[0]);
1095 		r = be64_to_cpu(hptep[1]);
1096 		if (r & HPTE_R_C) {
1097 			hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
1098 			if (!(rev[i].guest_rpte & HPTE_R_C)) {
1099 				rev[i].guest_rpte |= HPTE_R_C;
1100 				note_hpte_modification(kvm, &rev[i]);
1101 			}
1102 			n = kvmppc_actual_pgsz(v, r);
1103 			n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 			if (n > npages_dirty)
1105 				npages_dirty = n;
1106 			eieio();
1107 		}
1108 		v &= ~HPTE_V_ABSENT;
1109 		v |= HPTE_V_VALID;
1110 		__unlock_hpte(hptep, v);
1111 	} while ((i = j) != head);
1112 
1113 	unlock_rmap(rmapp);
1114 	return npages_dirty;
1115 }
1116 
1117 void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1118 			      struct kvm_memory_slot *memslot,
1119 			      unsigned long *map)
1120 {
1121 	unsigned long gfn;
1122 
1123 	if (!vpa->dirty || !vpa->pinned_addr)
1124 		return;
1125 	gfn = vpa->gpa >> PAGE_SHIFT;
1126 	if (gfn < memslot->base_gfn ||
1127 	    gfn >= memslot->base_gfn + memslot->npages)
1128 		return;
1129 
1130 	vpa->dirty = false;
1131 	if (map)
1132 		__set_bit_le(gfn - memslot->base_gfn, map);
1133 }
1134 
1135 long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
1136 			struct kvm_memory_slot *memslot, unsigned long *map)
1137 {
1138 	unsigned long i;
1139 	unsigned long *rmapp;
1140 
1141 	preempt_disable();
1142 	rmapp = memslot->arch.rmap;
1143 	for (i = 0; i < memslot->npages; ++i) {
1144 		int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
1145 		/*
1146 		 * Note that if npages > 0 then i must be a multiple of npages,
1147 		 * since we always put huge-page HPTEs in the rmap chain
1148 		 * corresponding to their page base address.
1149 		 */
1150 		if (npages)
1151 			set_dirty_bits(map, i, npages);
1152 		++rmapp;
1153 	}
1154 	preempt_enable();
1155 	return 0;
1156 }
1157 
1158 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1159 			    unsigned long *nb_ret)
1160 {
1161 	struct kvm_memory_slot *memslot;
1162 	unsigned long gfn = gpa >> PAGE_SHIFT;
1163 	struct page *page, *pages[1];
1164 	int npages;
1165 	unsigned long hva, offset;
1166 	int srcu_idx;
1167 
1168 	srcu_idx = srcu_read_lock(&kvm->srcu);
1169 	memslot = gfn_to_memslot(kvm, gfn);
1170 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1171 		goto err;
1172 	hva = gfn_to_hva_memslot(memslot, gfn);
1173 	npages = get_user_pages_fast(hva, 1, 1, pages);
1174 	if (npages < 1)
1175 		goto err;
1176 	page = pages[0];
1177 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1178 
1179 	offset = gpa & (PAGE_SIZE - 1);
1180 	if (nb_ret)
1181 		*nb_ret = PAGE_SIZE - offset;
1182 	return page_address(page) + offset;
1183 
1184  err:
1185 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1186 	return NULL;
1187 }
1188 
1189 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1190 			     bool dirty)
1191 {
1192 	struct page *page = virt_to_page(va);
1193 	struct kvm_memory_slot *memslot;
1194 	unsigned long gfn;
1195 	int srcu_idx;
1196 
1197 	put_page(page);
1198 
1199 	if (!dirty)
1200 		return;
1201 
1202 	/* We need to mark this page dirty in the memslot dirty_bitmap, if any */
1203 	gfn = gpa >> PAGE_SHIFT;
1204 	srcu_idx = srcu_read_lock(&kvm->srcu);
1205 	memslot = gfn_to_memslot(kvm, gfn);
1206 	if (memslot && memslot->dirty_bitmap)
1207 		set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap);
1208 	srcu_read_unlock(&kvm->srcu, srcu_idx);
1209 }
1210 
1211 /*
1212  * HPT resizing
1213  */
1214 static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
1215 {
1216 	int rc;
1217 
1218 	rc = kvmppc_allocate_hpt(&resize->hpt, resize->order);
1219 	if (rc < 0)
1220 		return rc;
1221 
1222 	resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n",
1223 			 resize->hpt.virt);
1224 
1225 	return 0;
1226 }
1227 
1228 static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1229 					    unsigned long idx)
1230 {
1231 	struct kvm *kvm = resize->kvm;
1232 	struct kvm_hpt_info *old = &kvm->arch.hpt;
1233 	struct kvm_hpt_info *new = &resize->hpt;
1234 	unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1;
1235 	unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1;
1236 	__be64 *hptep, *new_hptep;
1237 	unsigned long vpte, rpte, guest_rpte;
1238 	int ret;
1239 	struct revmap_entry *rev;
1240 	unsigned long apsize, avpn, pteg, hash;
1241 	unsigned long new_idx, new_pteg, replace_vpte;
1242 	int pshift;
1243 
1244 	hptep = (__be64 *)(old->virt + (idx << 4));
1245 
1246 	/* Guest is stopped, so new HPTEs can't be added or faulted
1247 	 * in, only unmapped or altered by host actions.  So, it's
1248 	 * safe to check this before we take the HPTE lock */
1249 	vpte = be64_to_cpu(hptep[0]);
1250 	if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1251 		return 0; /* nothing to do */
1252 
1253 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
1254 		cpu_relax();
1255 
1256 	vpte = be64_to_cpu(hptep[0]);
1257 
1258 	ret = 0;
1259 	if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1260 		/* Nothing to do */
1261 		goto out;
1262 
1263 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1264 		rpte = be64_to_cpu(hptep[1]);
1265 		vpte = hpte_new_to_old_v(vpte, rpte);
1266 	}
1267 
1268 	/* Unmap */
1269 	rev = &old->rev[idx];
1270 	guest_rpte = rev->guest_rpte;
1271 
1272 	ret = -EIO;
1273 	apsize = kvmppc_actual_pgsz(vpte, guest_rpte);
1274 	if (!apsize)
1275 		goto out;
1276 
1277 	if (vpte & HPTE_V_VALID) {
1278 		unsigned long gfn = hpte_rpn(guest_rpte, apsize);
1279 		int srcu_idx = srcu_read_lock(&kvm->srcu);
1280 		struct kvm_memory_slot *memslot =
1281 			__gfn_to_memslot(kvm_memslots(kvm), gfn);
1282 
1283 		if (memslot) {
1284 			unsigned long *rmapp;
1285 			rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1286 
1287 			lock_rmap(rmapp);
1288 			kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn);
1289 			unlock_rmap(rmapp);
1290 		}
1291 
1292 		srcu_read_unlock(&kvm->srcu, srcu_idx);
1293 	}
1294 
1295 	/* Reload PTE after unmap */
1296 	vpte = be64_to_cpu(hptep[0]);
1297 	BUG_ON(vpte & HPTE_V_VALID);
1298 	BUG_ON(!(vpte & HPTE_V_ABSENT));
1299 
1300 	ret = 0;
1301 	if (!(vpte & HPTE_V_BOLTED))
1302 		goto out;
1303 
1304 	rpte = be64_to_cpu(hptep[1]);
1305 
1306 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1307 		vpte = hpte_new_to_old_v(vpte, rpte);
1308 		rpte = hpte_new_to_old_r(rpte);
1309 	}
1310 
1311 	pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
1312 	avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
1313 	pteg = idx / HPTES_PER_GROUP;
1314 	if (vpte & HPTE_V_SECONDARY)
1315 		pteg = ~pteg;
1316 
1317 	if (!(vpte & HPTE_V_1TB_SEG)) {
1318 		unsigned long offset, vsid;
1319 
1320 		/* We only have 28 - 23 bits of offset in avpn */
1321 		offset = (avpn & 0x1f) << 23;
1322 		vsid = avpn >> 5;
1323 		/* We can find more bits from the pteg value */
1324 		if (pshift < 23)
1325 			offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
1326 
1327 		hash = vsid ^ (offset >> pshift);
1328 	} else {
1329 		unsigned long offset, vsid;
1330 
1331 		/* We only have 40 - 23 bits of seg_off in avpn */
1332 		offset = (avpn & 0x1ffff) << 23;
1333 		vsid = avpn >> 17;
1334 		if (pshift < 23)
1335 			offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
1336 
1337 		hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
1338 	}
1339 
1340 	new_pteg = hash & new_hash_mask;
1341 	if (vpte & HPTE_V_SECONDARY)
1342 		new_pteg = ~hash & new_hash_mask;
1343 
1344 	new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
1345 	new_hptep = (__be64 *)(new->virt + (new_idx << 4));
1346 
1347 	replace_vpte = be64_to_cpu(new_hptep[0]);
1348 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1349 		unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
1350 		replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
1351 	}
1352 
1353 	if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1354 		BUG_ON(new->order >= old->order);
1355 
1356 		if (replace_vpte & HPTE_V_BOLTED) {
1357 			if (vpte & HPTE_V_BOLTED)
1358 				/* Bolted collision, nothing we can do */
1359 				ret = -ENOSPC;
1360 			/* Discard the new HPTE */
1361 			goto out;
1362 		}
1363 
1364 		/* Discard the previous HPTE */
1365 	}
1366 
1367 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1368 		rpte = hpte_old_to_new_r(vpte, rpte);
1369 		vpte = hpte_old_to_new_v(vpte);
1370 	}
1371 
1372 	new_hptep[1] = cpu_to_be64(rpte);
1373 	new->rev[new_idx].guest_rpte = guest_rpte;
1374 	/* No need for a barrier, since new HPT isn't active */
1375 	new_hptep[0] = cpu_to_be64(vpte);
1376 	unlock_hpte(new_hptep, vpte);
1377 
1378 out:
1379 	unlock_hpte(hptep, vpte);
1380 	return ret;
1381 }
1382 
1383 static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
1384 {
1385 	struct kvm *kvm = resize->kvm;
1386 	unsigned  long i;
1387 	int rc;
1388 
1389 	for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
1390 		rc = resize_hpt_rehash_hpte(resize, i);
1391 		if (rc != 0)
1392 			return rc;
1393 	}
1394 
1395 	return 0;
1396 }
1397 
1398 static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
1399 {
1400 	struct kvm *kvm = resize->kvm;
1401 	struct kvm_hpt_info hpt_tmp;
1402 
1403 	/* Exchange the pending tables in the resize structure with
1404 	 * the active tables */
1405 
1406 	resize_hpt_debug(resize, "resize_hpt_pivot()\n");
1407 
1408 	spin_lock(&kvm->mmu_lock);
1409 	asm volatile("ptesync" : : : "memory");
1410 
1411 	hpt_tmp = kvm->arch.hpt;
1412 	kvmppc_set_hpt(kvm, &resize->hpt);
1413 	resize->hpt = hpt_tmp;
1414 
1415 	spin_unlock(&kvm->mmu_lock);
1416 
1417 	synchronize_srcu_expedited(&kvm->srcu);
1418 
1419 	if (cpu_has_feature(CPU_FTR_ARCH_300))
1420 		kvmppc_setup_partition_table(kvm);
1421 
1422 	resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
1423 }
1424 
1425 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
1426 {
1427 	if (WARN_ON(!mutex_is_locked(&kvm->lock)))
1428 		return;
1429 
1430 	if (!resize)
1431 		return;
1432 
1433 	if (resize->error != -EBUSY) {
1434 		if (resize->hpt.virt)
1435 			kvmppc_free_hpt(&resize->hpt);
1436 		kfree(resize);
1437 	}
1438 
1439 	if (kvm->arch.resize_hpt == resize)
1440 		kvm->arch.resize_hpt = NULL;
1441 }
1442 
1443 static void resize_hpt_prepare_work(struct work_struct *work)
1444 {
1445 	struct kvm_resize_hpt *resize = container_of(work,
1446 						     struct kvm_resize_hpt,
1447 						     work);
1448 	struct kvm *kvm = resize->kvm;
1449 	int err = 0;
1450 
1451 	if (WARN_ON(resize->error != -EBUSY))
1452 		return;
1453 
1454 	mutex_lock(&kvm->lock);
1455 
1456 	/* Request is still current? */
1457 	if (kvm->arch.resize_hpt == resize) {
1458 		/* We may request large allocations here:
1459 		 * do not sleep with kvm->lock held for a while.
1460 		 */
1461 		mutex_unlock(&kvm->lock);
1462 
1463 		resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
1464 				 resize->order);
1465 
1466 		err = resize_hpt_allocate(resize);
1467 
1468 		/* We have strict assumption about -EBUSY
1469 		 * when preparing for HPT resize.
1470 		 */
1471 		if (WARN_ON(err == -EBUSY))
1472 			err = -EINPROGRESS;
1473 
1474 		mutex_lock(&kvm->lock);
1475 		/* It is possible that kvm->arch.resize_hpt != resize
1476 		 * after we grab kvm->lock again.
1477 		 */
1478 	}
1479 
1480 	resize->error = err;
1481 
1482 	if (kvm->arch.resize_hpt != resize)
1483 		resize_hpt_release(kvm, resize);
1484 
1485 	mutex_unlock(&kvm->lock);
1486 }
1487 
1488 long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
1489 				     struct kvm_ppc_resize_hpt *rhpt)
1490 {
1491 	unsigned long flags = rhpt->flags;
1492 	unsigned long shift = rhpt->shift;
1493 	struct kvm_resize_hpt *resize;
1494 	int ret;
1495 
1496 	if (flags != 0 || kvm_is_radix(kvm))
1497 		return -EINVAL;
1498 
1499 	if (shift && ((shift < 18) || (shift > 46)))
1500 		return -EINVAL;
1501 
1502 	mutex_lock(&kvm->lock);
1503 
1504 	resize = kvm->arch.resize_hpt;
1505 
1506 	if (resize) {
1507 		if (resize->order == shift) {
1508 			/* Suitable resize in progress? */
1509 			ret = resize->error;
1510 			if (ret == -EBUSY)
1511 				ret = 100; /* estimated time in ms */
1512 			else if (ret)
1513 				resize_hpt_release(kvm, resize);
1514 
1515 			goto out;
1516 		}
1517 
1518 		/* not suitable, cancel it */
1519 		resize_hpt_release(kvm, resize);
1520 	}
1521 
1522 	ret = 0;
1523 	if (!shift)
1524 		goto out; /* nothing to do */
1525 
1526 	/* start new resize */
1527 
1528 	resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1529 	if (!resize) {
1530 		ret = -ENOMEM;
1531 		goto out;
1532 	}
1533 
1534 	resize->error = -EBUSY;
1535 	resize->order = shift;
1536 	resize->kvm = kvm;
1537 	INIT_WORK(&resize->work, resize_hpt_prepare_work);
1538 	kvm->arch.resize_hpt = resize;
1539 
1540 	schedule_work(&resize->work);
1541 
1542 	ret = 100; /* estimated time in ms */
1543 
1544 out:
1545 	mutex_unlock(&kvm->lock);
1546 	return ret;
1547 }
1548 
1549 static void resize_hpt_boot_vcpu(void *opaque)
1550 {
1551 	/* Nothing to do, just force a KVM exit */
1552 }
1553 
1554 long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
1555 				    struct kvm_ppc_resize_hpt *rhpt)
1556 {
1557 	unsigned long flags = rhpt->flags;
1558 	unsigned long shift = rhpt->shift;
1559 	struct kvm_resize_hpt *resize;
1560 	long ret;
1561 
1562 	if (flags != 0 || kvm_is_radix(kvm))
1563 		return -EINVAL;
1564 
1565 	if (shift && ((shift < 18) || (shift > 46)))
1566 		return -EINVAL;
1567 
1568 	mutex_lock(&kvm->lock);
1569 
1570 	resize = kvm->arch.resize_hpt;
1571 
1572 	/* This shouldn't be possible */
1573 	ret = -EIO;
1574 	if (WARN_ON(!kvm->arch.mmu_ready))
1575 		goto out_no_hpt;
1576 
1577 	/* Stop VCPUs from running while we mess with the HPT */
1578 	kvm->arch.mmu_ready = 0;
1579 	smp_mb();
1580 
1581 	/* Boot all CPUs out of the guest so they re-read
1582 	 * mmu_ready */
1583 	on_each_cpu(resize_hpt_boot_vcpu, NULL, 1);
1584 
1585 	ret = -ENXIO;
1586 	if (!resize || (resize->order != shift))
1587 		goto out;
1588 
1589 	ret = resize->error;
1590 	if (ret)
1591 		goto out;
1592 
1593 	ret = resize_hpt_rehash(resize);
1594 	if (ret)
1595 		goto out;
1596 
1597 	resize_hpt_pivot(resize);
1598 
1599 out:
1600 	/* Let VCPUs run again */
1601 	kvm->arch.mmu_ready = 1;
1602 	smp_mb();
1603 out_no_hpt:
1604 	resize_hpt_release(kvm, resize);
1605 	mutex_unlock(&kvm->lock);
1606 	return ret;
1607 }
1608 
1609 /*
1610  * Functions for reading and writing the hash table via reads and
1611  * writes on a file descriptor.
1612  *
1613  * Reads return the guest view of the hash table, which has to be
1614  * pieced together from the real hash table and the guest_rpte
1615  * values in the revmap array.
1616  *
1617  * On writes, each HPTE written is considered in turn, and if it
1618  * is valid, it is written to the HPT as if an H_ENTER with the
1619  * exact flag set was done.  When the invalid count is non-zero
1620  * in the header written to the stream, the kernel will make
1621  * sure that that many HPTEs are invalid, and invalidate them
1622  * if not.
1623  */
1624 
1625 struct kvm_htab_ctx {
1626 	unsigned long	index;
1627 	unsigned long	flags;
1628 	struct kvm	*kvm;
1629 	int		first_pass;
1630 };
1631 
1632 #define HPTE_SIZE	(2 * sizeof(unsigned long))
1633 
1634 /*
1635  * Returns 1 if this HPT entry has been modified or has pending
1636  * R/C bit changes.
1637  */
1638 static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1639 {
1640 	unsigned long rcbits_unset;
1641 
1642 	if (revp->guest_rpte & HPTE_GR_MODIFIED)
1643 		return 1;
1644 
1645 	/* Also need to consider changes in reference and changed bits */
1646 	rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1647 	if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
1648 	    (be64_to_cpu(hptp[1]) & rcbits_unset))
1649 		return 1;
1650 
1651 	return 0;
1652 }
1653 
1654 static long record_hpte(unsigned long flags, __be64 *hptp,
1655 			unsigned long *hpte, struct revmap_entry *revp,
1656 			int want_valid, int first_pass)
1657 {
1658 	unsigned long v, r, hr;
1659 	unsigned long rcbits_unset;
1660 	int ok = 1;
1661 	int valid, dirty;
1662 
1663 	/* Unmodified entries are uninteresting except on the first pass */
1664 	dirty = hpte_dirty(revp, hptp);
1665 	if (!first_pass && !dirty)
1666 		return 0;
1667 
1668 	valid = 0;
1669 	if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1670 		valid = 1;
1671 		if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1672 		    !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1673 			valid = 0;
1674 	}
1675 	if (valid != want_valid)
1676 		return 0;
1677 
1678 	v = r = 0;
1679 	if (valid || dirty) {
1680 		/* lock the HPTE so it's stable and read it */
1681 		preempt_disable();
1682 		while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1683 			cpu_relax();
1684 		v = be64_to_cpu(hptp[0]);
1685 		hr = be64_to_cpu(hptp[1]);
1686 		if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1687 			v = hpte_new_to_old_v(v, hr);
1688 			hr = hpte_new_to_old_r(hr);
1689 		}
1690 
1691 		/* re-evaluate valid and dirty from synchronized HPTE value */
1692 		valid = !!(v & HPTE_V_VALID);
1693 		dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1694 
1695 		/* Harvest R and C into guest view if necessary */
1696 		rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1697 		if (valid && (rcbits_unset & hr)) {
1698 			revp->guest_rpte |= (hr &
1699 				(HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1700 			dirty = 1;
1701 		}
1702 
1703 		if (v & HPTE_V_ABSENT) {
1704 			v &= ~HPTE_V_ABSENT;
1705 			v |= HPTE_V_VALID;
1706 			valid = 1;
1707 		}
1708 		if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1709 			valid = 0;
1710 
1711 		r = revp->guest_rpte;
1712 		/* only clear modified if this is the right sort of entry */
1713 		if (valid == want_valid && dirty) {
1714 			r &= ~HPTE_GR_MODIFIED;
1715 			revp->guest_rpte = r;
1716 		}
1717 		unlock_hpte(hptp, be64_to_cpu(hptp[0]));
1718 		preempt_enable();
1719 		if (!(valid == want_valid && (first_pass || dirty)))
1720 			ok = 0;
1721 	}
1722 	hpte[0] = cpu_to_be64(v);
1723 	hpte[1] = cpu_to_be64(r);
1724 	return ok;
1725 }
1726 
1727 static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1728 			     size_t count, loff_t *ppos)
1729 {
1730 	struct kvm_htab_ctx *ctx = file->private_data;
1731 	struct kvm *kvm = ctx->kvm;
1732 	struct kvm_get_htab_header hdr;
1733 	__be64 *hptp;
1734 	struct revmap_entry *revp;
1735 	unsigned long i, nb, nw;
1736 	unsigned long __user *lbuf;
1737 	struct kvm_get_htab_header __user *hptr;
1738 	unsigned long flags;
1739 	int first_pass;
1740 	unsigned long hpte[2];
1741 
1742 	if (!access_ok(VERIFY_WRITE, buf, count))
1743 		return -EFAULT;
1744 	if (kvm_is_radix(kvm))
1745 		return 0;
1746 
1747 	first_pass = ctx->first_pass;
1748 	flags = ctx->flags;
1749 
1750 	i = ctx->index;
1751 	hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1752 	revp = kvm->arch.hpt.rev + i;
1753 	lbuf = (unsigned long __user *)buf;
1754 
1755 	nb = 0;
1756 	while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1757 		/* Initialize header */
1758 		hptr = (struct kvm_get_htab_header __user *)buf;
1759 		hdr.n_valid = 0;
1760 		hdr.n_invalid = 0;
1761 		nw = nb;
1762 		nb += sizeof(hdr);
1763 		lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1764 
1765 		/* Skip uninteresting entries, i.e. clean on not-first pass */
1766 		if (!first_pass) {
1767 			while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1768 			       !hpte_dirty(revp, hptp)) {
1769 				++i;
1770 				hptp += 2;
1771 				++revp;
1772 			}
1773 		}
1774 		hdr.index = i;
1775 
1776 		/* Grab a series of valid entries */
1777 		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1778 		       hdr.n_valid < 0xffff &&
1779 		       nb + HPTE_SIZE < count &&
1780 		       record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1781 			/* valid entry, write it out */
1782 			++hdr.n_valid;
1783 			if (__put_user(hpte[0], lbuf) ||
1784 			    __put_user(hpte[1], lbuf + 1))
1785 				return -EFAULT;
1786 			nb += HPTE_SIZE;
1787 			lbuf += 2;
1788 			++i;
1789 			hptp += 2;
1790 			++revp;
1791 		}
1792 		/* Now skip invalid entries while we can */
1793 		while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1794 		       hdr.n_invalid < 0xffff &&
1795 		       record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1796 			/* found an invalid entry */
1797 			++hdr.n_invalid;
1798 			++i;
1799 			hptp += 2;
1800 			++revp;
1801 		}
1802 
1803 		if (hdr.n_valid || hdr.n_invalid) {
1804 			/* write back the header */
1805 			if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1806 				return -EFAULT;
1807 			nw = nb;
1808 			buf = (char __user *)lbuf;
1809 		} else {
1810 			nb = nw;
1811 		}
1812 
1813 		/* Check if we've wrapped around the hash table */
1814 		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
1815 			i = 0;
1816 			ctx->first_pass = 0;
1817 			break;
1818 		}
1819 	}
1820 
1821 	ctx->index = i;
1822 
1823 	return nb;
1824 }
1825 
1826 static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1827 			      size_t count, loff_t *ppos)
1828 {
1829 	struct kvm_htab_ctx *ctx = file->private_data;
1830 	struct kvm *kvm = ctx->kvm;
1831 	struct kvm_get_htab_header hdr;
1832 	unsigned long i, j;
1833 	unsigned long v, r;
1834 	unsigned long __user *lbuf;
1835 	__be64 *hptp;
1836 	unsigned long tmp[2];
1837 	ssize_t nb;
1838 	long int err, ret;
1839 	int mmu_ready;
1840 	int pshift;
1841 
1842 	if (!access_ok(VERIFY_READ, buf, count))
1843 		return -EFAULT;
1844 	if (kvm_is_radix(kvm))
1845 		return -EINVAL;
1846 
1847 	/* lock out vcpus from running while we're doing this */
1848 	mutex_lock(&kvm->lock);
1849 	mmu_ready = kvm->arch.mmu_ready;
1850 	if (mmu_ready) {
1851 		kvm->arch.mmu_ready = 0;	/* temporarily */
1852 		/* order mmu_ready vs. vcpus_running */
1853 		smp_mb();
1854 		if (atomic_read(&kvm->arch.vcpus_running)) {
1855 			kvm->arch.mmu_ready = 1;
1856 			mutex_unlock(&kvm->lock);
1857 			return -EBUSY;
1858 		}
1859 	}
1860 
1861 	err = 0;
1862 	for (nb = 0; nb + sizeof(hdr) <= count; ) {
1863 		err = -EFAULT;
1864 		if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1865 			break;
1866 
1867 		err = 0;
1868 		if (nb + hdr.n_valid * HPTE_SIZE > count)
1869 			break;
1870 
1871 		nb += sizeof(hdr);
1872 		buf += sizeof(hdr);
1873 
1874 		err = -EINVAL;
1875 		i = hdr.index;
1876 		if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
1877 		    i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
1878 			break;
1879 
1880 		hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1881 		lbuf = (unsigned long __user *)buf;
1882 		for (j = 0; j < hdr.n_valid; ++j) {
1883 			__be64 hpte_v;
1884 			__be64 hpte_r;
1885 
1886 			err = -EFAULT;
1887 			if (__get_user(hpte_v, lbuf) ||
1888 			    __get_user(hpte_r, lbuf + 1))
1889 				goto out;
1890 			v = be64_to_cpu(hpte_v);
1891 			r = be64_to_cpu(hpte_r);
1892 			err = -EINVAL;
1893 			if (!(v & HPTE_V_VALID))
1894 				goto out;
1895 			pshift = kvmppc_hpte_base_page_shift(v, r);
1896 			if (pshift <= 0)
1897 				goto out;
1898 			lbuf += 2;
1899 			nb += HPTE_SIZE;
1900 
1901 			if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1902 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1903 			err = -EIO;
1904 			ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1905 							 tmp);
1906 			if (ret != H_SUCCESS) {
1907 				pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1908 				       "r=%lx\n", ret, i, v, r);
1909 				goto out;
1910 			}
1911 			if (!mmu_ready && is_vrma_hpte(v)) {
1912 				unsigned long senc, lpcr;
1913 
1914 				senc = slb_pgsize_encoding(1ul << pshift);
1915 				kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1916 					(VRMA_VSID << SLB_VSID_SHIFT_1T);
1917 				if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1918 					lpcr = senc << (LPCR_VRMASD_SH - 4);
1919 					kvmppc_update_lpcr(kvm, lpcr,
1920 							   LPCR_VRMASD);
1921 				} else {
1922 					kvmppc_setup_partition_table(kvm);
1923 				}
1924 				mmu_ready = 1;
1925 			}
1926 			++i;
1927 			hptp += 2;
1928 		}
1929 
1930 		for (j = 0; j < hdr.n_invalid; ++j) {
1931 			if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1932 				kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1933 			++i;
1934 			hptp += 2;
1935 		}
1936 		err = 0;
1937 	}
1938 
1939  out:
1940 	/* Order HPTE updates vs. mmu_ready */
1941 	smp_wmb();
1942 	kvm->arch.mmu_ready = mmu_ready;
1943 	mutex_unlock(&kvm->lock);
1944 
1945 	if (err)
1946 		return err;
1947 	return nb;
1948 }
1949 
1950 static int kvm_htab_release(struct inode *inode, struct file *filp)
1951 {
1952 	struct kvm_htab_ctx *ctx = filp->private_data;
1953 
1954 	filp->private_data = NULL;
1955 	if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1956 		atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1957 	kvm_put_kvm(ctx->kvm);
1958 	kfree(ctx);
1959 	return 0;
1960 }
1961 
1962 static const struct file_operations kvm_htab_fops = {
1963 	.read		= kvm_htab_read,
1964 	.write		= kvm_htab_write,
1965 	.llseek		= default_llseek,
1966 	.release	= kvm_htab_release,
1967 };
1968 
1969 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1970 {
1971 	int ret;
1972 	struct kvm_htab_ctx *ctx;
1973 	int rwflag;
1974 
1975 	/* reject flags we don't recognize */
1976 	if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1977 		return -EINVAL;
1978 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1979 	if (!ctx)
1980 		return -ENOMEM;
1981 	kvm_get_kvm(kvm);
1982 	ctx->kvm = kvm;
1983 	ctx->index = ghf->start_index;
1984 	ctx->flags = ghf->flags;
1985 	ctx->first_pass = 1;
1986 
1987 	rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1988 	ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1989 	if (ret < 0) {
1990 		kfree(ctx);
1991 		kvm_put_kvm(kvm);
1992 		return ret;
1993 	}
1994 
1995 	if (rwflag == O_RDONLY) {
1996 		mutex_lock(&kvm->slots_lock);
1997 		atomic_inc(&kvm->arch.hpte_mod_interest);
1998 		/* make sure kvmppc_do_h_enter etc. see the increment */
1999 		synchronize_srcu_expedited(&kvm->srcu);
2000 		mutex_unlock(&kvm->slots_lock);
2001 	}
2002 
2003 	return ret;
2004 }
2005 
2006 struct debugfs_htab_state {
2007 	struct kvm	*kvm;
2008 	struct mutex	mutex;
2009 	unsigned long	hpt_index;
2010 	int		chars_left;
2011 	int		buf_index;
2012 	char		buf[64];
2013 };
2014 
2015 static int debugfs_htab_open(struct inode *inode, struct file *file)
2016 {
2017 	struct kvm *kvm = inode->i_private;
2018 	struct debugfs_htab_state *p;
2019 
2020 	p = kzalloc(sizeof(*p), GFP_KERNEL);
2021 	if (!p)
2022 		return -ENOMEM;
2023 
2024 	kvm_get_kvm(kvm);
2025 	p->kvm = kvm;
2026 	mutex_init(&p->mutex);
2027 	file->private_data = p;
2028 
2029 	return nonseekable_open(inode, file);
2030 }
2031 
2032 static int debugfs_htab_release(struct inode *inode, struct file *file)
2033 {
2034 	struct debugfs_htab_state *p = file->private_data;
2035 
2036 	kvm_put_kvm(p->kvm);
2037 	kfree(p);
2038 	return 0;
2039 }
2040 
2041 static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
2042 				 size_t len, loff_t *ppos)
2043 {
2044 	struct debugfs_htab_state *p = file->private_data;
2045 	ssize_t ret, r;
2046 	unsigned long i, n;
2047 	unsigned long v, hr, gr;
2048 	struct kvm *kvm;
2049 	__be64 *hptp;
2050 
2051 	kvm = p->kvm;
2052 	if (kvm_is_radix(kvm))
2053 		return 0;
2054 
2055 	ret = mutex_lock_interruptible(&p->mutex);
2056 	if (ret)
2057 		return ret;
2058 
2059 	if (p->chars_left) {
2060 		n = p->chars_left;
2061 		if (n > len)
2062 			n = len;
2063 		r = copy_to_user(buf, p->buf + p->buf_index, n);
2064 		n -= r;
2065 		p->chars_left -= n;
2066 		p->buf_index += n;
2067 		buf += n;
2068 		len -= n;
2069 		ret = n;
2070 		if (r) {
2071 			if (!n)
2072 				ret = -EFAULT;
2073 			goto out;
2074 		}
2075 	}
2076 
2077 	i = p->hpt_index;
2078 	hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
2079 	for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
2080 	     ++i, hptp += 2) {
2081 		if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
2082 			continue;
2083 
2084 		/* lock the HPTE so it's stable and read it */
2085 		preempt_disable();
2086 		while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
2087 			cpu_relax();
2088 		v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
2089 		hr = be64_to_cpu(hptp[1]);
2090 		gr = kvm->arch.hpt.rev[i].guest_rpte;
2091 		unlock_hpte(hptp, v);
2092 		preempt_enable();
2093 
2094 		if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
2095 			continue;
2096 
2097 		n = scnprintf(p->buf, sizeof(p->buf),
2098 			      "%6lx %.16lx %.16lx %.16lx\n",
2099 			      i, v, hr, gr);
2100 		p->chars_left = n;
2101 		if (n > len)
2102 			n = len;
2103 		r = copy_to_user(buf, p->buf, n);
2104 		n -= r;
2105 		p->chars_left -= n;
2106 		p->buf_index = n;
2107 		buf += n;
2108 		len -= n;
2109 		ret += n;
2110 		if (r) {
2111 			if (!ret)
2112 				ret = -EFAULT;
2113 			goto out;
2114 		}
2115 	}
2116 	p->hpt_index = i;
2117 
2118  out:
2119 	mutex_unlock(&p->mutex);
2120 	return ret;
2121 }
2122 
2123 static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
2124 			   size_t len, loff_t *ppos)
2125 {
2126 	return -EACCES;
2127 }
2128 
2129 static const struct file_operations debugfs_htab_fops = {
2130 	.owner	 = THIS_MODULE,
2131 	.open	 = debugfs_htab_open,
2132 	.release = debugfs_htab_release,
2133 	.read	 = debugfs_htab_read,
2134 	.write	 = debugfs_htab_write,
2135 	.llseek	 = generic_file_llseek,
2136 };
2137 
2138 void kvmppc_mmu_debugfs_init(struct kvm *kvm)
2139 {
2140 	kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
2141 						    kvm->arch.debugfs_dir, kvm,
2142 						    &debugfs_htab_fops);
2143 }
2144 
2145 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
2146 {
2147 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
2148 
2149 	vcpu->arch.slb_nr = 32;		/* POWER7/POWER8 */
2150 
2151 	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
2152 	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
2153 
2154 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
2155 }
2156