1de56a948SPaul Mackerras /*
2de56a948SPaul Mackerras  * This program is free software; you can redistribute it and/or modify
3de56a948SPaul Mackerras  * it under the terms of the GNU General Public License, version 2, as
4de56a948SPaul Mackerras  * published by the Free Software Foundation.
5de56a948SPaul Mackerras  *
6de56a948SPaul Mackerras  * This program is distributed in the hope that it will be useful,
7de56a948SPaul Mackerras  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8de56a948SPaul Mackerras  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9de56a948SPaul Mackerras  * GNU General Public License for more details.
10de56a948SPaul Mackerras  *
11de56a948SPaul Mackerras  * You should have received a copy of the GNU General Public License
12de56a948SPaul Mackerras  * along with this program; if not, write to the Free Software
13de56a948SPaul Mackerras  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14de56a948SPaul Mackerras  *
15de56a948SPaul Mackerras  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16de56a948SPaul Mackerras  */
17de56a948SPaul Mackerras 
18de56a948SPaul Mackerras #include <linux/types.h>
19de56a948SPaul Mackerras #include <linux/string.h>
20de56a948SPaul Mackerras #include <linux/kvm.h>
21de56a948SPaul Mackerras #include <linux/kvm_host.h>
22de56a948SPaul Mackerras #include <linux/highmem.h>
23de56a948SPaul Mackerras #include <linux/gfp.h>
24de56a948SPaul Mackerras #include <linux/slab.h>
25de56a948SPaul Mackerras #include <linux/hugetlb.h>
268936dda4SPaul Mackerras #include <linux/vmalloc.h>
27de56a948SPaul Mackerras 
28de56a948SPaul Mackerras #include <asm/tlbflush.h>
29de56a948SPaul Mackerras #include <asm/kvm_ppc.h>
30de56a948SPaul Mackerras #include <asm/kvm_book3s.h>
31de56a948SPaul Mackerras #include <asm/mmu-hash64.h>
32de56a948SPaul Mackerras #include <asm/hvcall.h>
33de56a948SPaul Mackerras #include <asm/synch.h>
34de56a948SPaul Mackerras #include <asm/ppc-opcode.h>
35de56a948SPaul Mackerras #include <asm/cputable.h>
36de56a948SPaul Mackerras 
379e368f29SPaul Mackerras /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
389e368f29SPaul Mackerras #define MAX_LPID_970	63
39de56a948SPaul Mackerras 
4032fad281SPaul Mackerras /* Power architecture requires HPT is at least 256kB */
4132fad281SPaul Mackerras #define PPC_MIN_HPT_ORDER	18
4232fad281SPaul Mackerras 
4332fad281SPaul Mackerras long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
44de56a948SPaul Mackerras {
45de56a948SPaul Mackerras 	unsigned long hpt;
468936dda4SPaul Mackerras 	struct revmap_entry *rev;
47d2a1b483SAlexander Graf 	struct kvmppc_linear_info *li;
4832fad281SPaul Mackerras 	long order = kvm_hpt_order;
49de56a948SPaul Mackerras 
5032fad281SPaul Mackerras 	if (htab_orderp) {
5132fad281SPaul Mackerras 		order = *htab_orderp;
5232fad281SPaul Mackerras 		if (order < PPC_MIN_HPT_ORDER)
5332fad281SPaul Mackerras 			order = PPC_MIN_HPT_ORDER;
5432fad281SPaul Mackerras 	}
5532fad281SPaul Mackerras 
5632fad281SPaul Mackerras 	/*
5732fad281SPaul Mackerras 	 * If the user wants a different size from default,
5832fad281SPaul Mackerras 	 * try first to allocate it from the kernel page allocator.
5932fad281SPaul Mackerras 	 */
6032fad281SPaul Mackerras 	hpt = 0;
6132fad281SPaul Mackerras 	if (order != kvm_hpt_order) {
6232fad281SPaul Mackerras 		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
6332fad281SPaul Mackerras 				       __GFP_NOWARN, order - PAGE_SHIFT);
6432fad281SPaul Mackerras 		if (!hpt)
6532fad281SPaul Mackerras 			--order;
6632fad281SPaul Mackerras 	}
6732fad281SPaul Mackerras 
6832fad281SPaul Mackerras 	/* Next try to allocate from the preallocated pool */
6932fad281SPaul Mackerras 	if (!hpt) {
70d2a1b483SAlexander Graf 		li = kvm_alloc_hpt();
71d2a1b483SAlexander Graf 		if (li) {
72d2a1b483SAlexander Graf 			hpt = (ulong)li->base_virt;
73d2a1b483SAlexander Graf 			kvm->arch.hpt_li = li;
7432fad281SPaul Mackerras 			order = kvm_hpt_order;
7532fad281SPaul Mackerras 		}
76d2a1b483SAlexander Graf 	}
77d2a1b483SAlexander Graf 
7832fad281SPaul Mackerras 	/* Lastly try successively smaller sizes from the page allocator */
7932fad281SPaul Mackerras 	while (!hpt && order > PPC_MIN_HPT_ORDER) {
8032fad281SPaul Mackerras 		hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
8132fad281SPaul Mackerras 				       __GFP_NOWARN, order - PAGE_SHIFT);
8232fad281SPaul Mackerras 		if (!hpt)
8332fad281SPaul Mackerras 			--order;
84de56a948SPaul Mackerras 	}
8532fad281SPaul Mackerras 
8632fad281SPaul Mackerras 	if (!hpt)
8732fad281SPaul Mackerras 		return -ENOMEM;
8832fad281SPaul Mackerras 
89de56a948SPaul Mackerras 	kvm->arch.hpt_virt = hpt;
9032fad281SPaul Mackerras 	kvm->arch.hpt_order = order;
9132fad281SPaul Mackerras 	/* HPTEs are 2**4 bytes long */
9232fad281SPaul Mackerras 	kvm->arch.hpt_npte = 1ul << (order - 4);
9332fad281SPaul Mackerras 	/* 128 (2**7) bytes in each HPTEG */
9432fad281SPaul Mackerras 	kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
95de56a948SPaul Mackerras 
968936dda4SPaul Mackerras 	/* Allocate reverse map array */
9732fad281SPaul Mackerras 	rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
988936dda4SPaul Mackerras 	if (!rev) {
998936dda4SPaul Mackerras 		pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
1008936dda4SPaul Mackerras 		goto out_freehpt;
1018936dda4SPaul Mackerras 	}
1028936dda4SPaul Mackerras 	kvm->arch.revmap = rev;
10332fad281SPaul Mackerras 	kvm->arch.sdr1 = __pa(hpt) | (order - 18);
1048936dda4SPaul Mackerras 
10532fad281SPaul Mackerras 	pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
10632fad281SPaul Mackerras 		hpt, order, kvm->arch.lpid);
107de56a948SPaul Mackerras 
10832fad281SPaul Mackerras 	if (htab_orderp)
10932fad281SPaul Mackerras 		*htab_orderp = order;
110de56a948SPaul Mackerras 	return 0;
1118936dda4SPaul Mackerras 
1128936dda4SPaul Mackerras  out_freehpt:
11332fad281SPaul Mackerras 	if (kvm->arch.hpt_li)
11432fad281SPaul Mackerras 		kvm_release_hpt(kvm->arch.hpt_li);
11532fad281SPaul Mackerras 	else
11632fad281SPaul Mackerras 		free_pages(hpt, order - PAGE_SHIFT);
1178936dda4SPaul Mackerras 	return -ENOMEM;
118de56a948SPaul Mackerras }
119de56a948SPaul Mackerras 
12032fad281SPaul Mackerras long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
12132fad281SPaul Mackerras {
12232fad281SPaul Mackerras 	long err = -EBUSY;
12332fad281SPaul Mackerras 	long order;
12432fad281SPaul Mackerras 
12532fad281SPaul Mackerras 	mutex_lock(&kvm->lock);
12632fad281SPaul Mackerras 	if (kvm->arch.rma_setup_done) {
12732fad281SPaul Mackerras 		kvm->arch.rma_setup_done = 0;
12832fad281SPaul Mackerras 		/* order rma_setup_done vs. vcpus_running */
12932fad281SPaul Mackerras 		smp_mb();
13032fad281SPaul Mackerras 		if (atomic_read(&kvm->arch.vcpus_running)) {
13132fad281SPaul Mackerras 			kvm->arch.rma_setup_done = 1;
13232fad281SPaul Mackerras 			goto out;
13332fad281SPaul Mackerras 		}
13432fad281SPaul Mackerras 	}
13532fad281SPaul Mackerras 	if (kvm->arch.hpt_virt) {
13632fad281SPaul Mackerras 		order = kvm->arch.hpt_order;
13732fad281SPaul Mackerras 		/* Set the entire HPT to 0, i.e. invalid HPTEs */
13832fad281SPaul Mackerras 		memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
13932fad281SPaul Mackerras 		/*
14032fad281SPaul Mackerras 		 * Set the whole last_vcpu array to an invalid vcpu number.
14132fad281SPaul Mackerras 		 * This ensures that each vcpu will flush its TLB on next entry.
14232fad281SPaul Mackerras 		 */
14332fad281SPaul Mackerras 		memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
14432fad281SPaul Mackerras 		*htab_orderp = order;
14532fad281SPaul Mackerras 		err = 0;
14632fad281SPaul Mackerras 	} else {
14732fad281SPaul Mackerras 		err = kvmppc_alloc_hpt(kvm, htab_orderp);
14832fad281SPaul Mackerras 		order = *htab_orderp;
14932fad281SPaul Mackerras 	}
15032fad281SPaul Mackerras  out:
15132fad281SPaul Mackerras 	mutex_unlock(&kvm->lock);
15232fad281SPaul Mackerras 	return err;
15332fad281SPaul Mackerras }
15432fad281SPaul Mackerras 
155de56a948SPaul Mackerras void kvmppc_free_hpt(struct kvm *kvm)
156de56a948SPaul Mackerras {
157043cc4d7SScott Wood 	kvmppc_free_lpid(kvm->arch.lpid);
1588936dda4SPaul Mackerras 	vfree(kvm->arch.revmap);
159d2a1b483SAlexander Graf 	if (kvm->arch.hpt_li)
160d2a1b483SAlexander Graf 		kvm_release_hpt(kvm->arch.hpt_li);
161d2a1b483SAlexander Graf 	else
16232fad281SPaul Mackerras 		free_pages(kvm->arch.hpt_virt,
16332fad281SPaul Mackerras 			   kvm->arch.hpt_order - PAGE_SHIFT);
164de56a948SPaul Mackerras }
165de56a948SPaul Mackerras 
166da9d1d7fSPaul Mackerras /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
167da9d1d7fSPaul Mackerras static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
168de56a948SPaul Mackerras {
169da9d1d7fSPaul Mackerras 	return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
170da9d1d7fSPaul Mackerras }
171da9d1d7fSPaul Mackerras 
172da9d1d7fSPaul Mackerras /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
173da9d1d7fSPaul Mackerras static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
174da9d1d7fSPaul Mackerras {
175da9d1d7fSPaul Mackerras 	return (pgsize == 0x10000) ? 0x1000 : 0;
176da9d1d7fSPaul Mackerras }
177da9d1d7fSPaul Mackerras 
178da9d1d7fSPaul Mackerras void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
179da9d1d7fSPaul Mackerras 		     unsigned long porder)
180da9d1d7fSPaul Mackerras {
181de56a948SPaul Mackerras 	unsigned long i;
182b2b2f165SPaul Mackerras 	unsigned long npages;
183c77162deSPaul Mackerras 	unsigned long hp_v, hp_r;
184c77162deSPaul Mackerras 	unsigned long addr, hash;
185da9d1d7fSPaul Mackerras 	unsigned long psize;
186da9d1d7fSPaul Mackerras 	unsigned long hp0, hp1;
187c77162deSPaul Mackerras 	long ret;
18832fad281SPaul Mackerras 	struct kvm *kvm = vcpu->kvm;
189de56a948SPaul Mackerras 
190da9d1d7fSPaul Mackerras 	psize = 1ul << porder;
191da9d1d7fSPaul Mackerras 	npages = memslot->npages >> (porder - PAGE_SHIFT);
192de56a948SPaul Mackerras 
193de56a948SPaul Mackerras 	/* VRMA can't be > 1TB */
1948936dda4SPaul Mackerras 	if (npages > 1ul << (40 - porder))
1958936dda4SPaul Mackerras 		npages = 1ul << (40 - porder);
196de56a948SPaul Mackerras 	/* Can't use more than 1 HPTE per HPTEG */
19732fad281SPaul Mackerras 	if (npages > kvm->arch.hpt_mask + 1)
19832fad281SPaul Mackerras 		npages = kvm->arch.hpt_mask + 1;
199de56a948SPaul Mackerras 
200da9d1d7fSPaul Mackerras 	hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
201da9d1d7fSPaul Mackerras 		HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
202da9d1d7fSPaul Mackerras 	hp1 = hpte1_pgsize_encoding(psize) |
203da9d1d7fSPaul Mackerras 		HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
204da9d1d7fSPaul Mackerras 
205de56a948SPaul Mackerras 	for (i = 0; i < npages; ++i) {
206c77162deSPaul Mackerras 		addr = i << porder;
207de56a948SPaul Mackerras 		/* can't use hpt_hash since va > 64 bits */
20832fad281SPaul Mackerras 		hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
209de56a948SPaul Mackerras 		/*
210de56a948SPaul Mackerras 		 * We assume that the hash table is empty and no
211de56a948SPaul Mackerras 		 * vcpus are using it at this stage.  Since we create
212de56a948SPaul Mackerras 		 * at most one HPTE per HPTEG, we just assume entry 7
213de56a948SPaul Mackerras 		 * is available and use it.
214de56a948SPaul Mackerras 		 */
2158936dda4SPaul Mackerras 		hash = (hash << 3) + 7;
216da9d1d7fSPaul Mackerras 		hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
217da9d1d7fSPaul Mackerras 		hp_r = hp1 | addr;
218c77162deSPaul Mackerras 		ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
219c77162deSPaul Mackerras 		if (ret != H_SUCCESS) {
220c77162deSPaul Mackerras 			pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
221c77162deSPaul Mackerras 			       addr, ret);
222c77162deSPaul Mackerras 			break;
223c77162deSPaul Mackerras 		}
224de56a948SPaul Mackerras 	}
225de56a948SPaul Mackerras }
226de56a948SPaul Mackerras 
227de56a948SPaul Mackerras int kvmppc_mmu_hv_init(void)
228de56a948SPaul Mackerras {
2299e368f29SPaul Mackerras 	unsigned long host_lpid, rsvd_lpid;
2309e368f29SPaul Mackerras 
2319e368f29SPaul Mackerras 	if (!cpu_has_feature(CPU_FTR_HVMODE))
232de56a948SPaul Mackerras 		return -EINVAL;
2339e368f29SPaul Mackerras 
234043cc4d7SScott Wood 	/* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
2359e368f29SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2369e368f29SPaul Mackerras 		host_lpid = mfspr(SPRN_LPID);	/* POWER7 */
2379e368f29SPaul Mackerras 		rsvd_lpid = LPID_RSVD;
2389e368f29SPaul Mackerras 	} else {
2399e368f29SPaul Mackerras 		host_lpid = 0;			/* PPC970 */
2409e368f29SPaul Mackerras 		rsvd_lpid = MAX_LPID_970;
2419e368f29SPaul Mackerras 	}
2429e368f29SPaul Mackerras 
243043cc4d7SScott Wood 	kvmppc_init_lpid(rsvd_lpid + 1);
244043cc4d7SScott Wood 
245043cc4d7SScott Wood 	kvmppc_claim_lpid(host_lpid);
2469e368f29SPaul Mackerras 	/* rsvd_lpid is reserved for use in partition switching */
247043cc4d7SScott Wood 	kvmppc_claim_lpid(rsvd_lpid);
248de56a948SPaul Mackerras 
249de56a948SPaul Mackerras 	return 0;
250de56a948SPaul Mackerras }
251de56a948SPaul Mackerras 
252de56a948SPaul Mackerras void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
253de56a948SPaul Mackerras {
254de56a948SPaul Mackerras }
255de56a948SPaul Mackerras 
256de56a948SPaul Mackerras static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
257de56a948SPaul Mackerras {
258de56a948SPaul Mackerras 	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
259de56a948SPaul Mackerras }
260de56a948SPaul Mackerras 
261c77162deSPaul Mackerras /*
262c77162deSPaul Mackerras  * This is called to get a reference to a guest page if there isn't
263c77162deSPaul Mackerras  * one already in the kvm->arch.slot_phys[][] arrays.
264c77162deSPaul Mackerras  */
265c77162deSPaul Mackerras static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
266da9d1d7fSPaul Mackerras 				  struct kvm_memory_slot *memslot,
267da9d1d7fSPaul Mackerras 				  unsigned long psize)
268c77162deSPaul Mackerras {
269c77162deSPaul Mackerras 	unsigned long start;
270da9d1d7fSPaul Mackerras 	long np, err;
271da9d1d7fSPaul Mackerras 	struct page *page, *hpage, *pages[1];
272da9d1d7fSPaul Mackerras 	unsigned long s, pgsize;
273c77162deSPaul Mackerras 	unsigned long *physp;
2749d0ef5eaSPaul Mackerras 	unsigned int is_io, got, pgorder;
2759d0ef5eaSPaul Mackerras 	struct vm_area_struct *vma;
276da9d1d7fSPaul Mackerras 	unsigned long pfn, i, npages;
277c77162deSPaul Mackerras 
278c77162deSPaul Mackerras 	physp = kvm->arch.slot_phys[memslot->id];
279c77162deSPaul Mackerras 	if (!physp)
280c77162deSPaul Mackerras 		return -EINVAL;
281da9d1d7fSPaul Mackerras 	if (physp[gfn - memslot->base_gfn])
282c77162deSPaul Mackerras 		return 0;
283c77162deSPaul Mackerras 
2849d0ef5eaSPaul Mackerras 	is_io = 0;
2859d0ef5eaSPaul Mackerras 	got = 0;
286c77162deSPaul Mackerras 	page = NULL;
287da9d1d7fSPaul Mackerras 	pgsize = psize;
2889d0ef5eaSPaul Mackerras 	err = -EINVAL;
289c77162deSPaul Mackerras 	start = gfn_to_hva_memslot(memslot, gfn);
290c77162deSPaul Mackerras 
291c77162deSPaul Mackerras 	/* Instantiate and get the page we want access to */
292c77162deSPaul Mackerras 	np = get_user_pages_fast(start, 1, 1, pages);
2939d0ef5eaSPaul Mackerras 	if (np != 1) {
2949d0ef5eaSPaul Mackerras 		/* Look up the vma for the page */
2959d0ef5eaSPaul Mackerras 		down_read(&current->mm->mmap_sem);
2969d0ef5eaSPaul Mackerras 		vma = find_vma(current->mm, start);
2979d0ef5eaSPaul Mackerras 		if (!vma || vma->vm_start > start ||
2989d0ef5eaSPaul Mackerras 		    start + psize > vma->vm_end ||
2999d0ef5eaSPaul Mackerras 		    !(vma->vm_flags & VM_PFNMAP))
3009d0ef5eaSPaul Mackerras 			goto up_err;
3019d0ef5eaSPaul Mackerras 		is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
3029d0ef5eaSPaul Mackerras 		pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
3039d0ef5eaSPaul Mackerras 		/* check alignment of pfn vs. requested page size */
3049d0ef5eaSPaul Mackerras 		if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
3059d0ef5eaSPaul Mackerras 			goto up_err;
3069d0ef5eaSPaul Mackerras 		up_read(&current->mm->mmap_sem);
3079d0ef5eaSPaul Mackerras 
3089d0ef5eaSPaul Mackerras 	} else {
309c77162deSPaul Mackerras 		page = pages[0];
310da9d1d7fSPaul Mackerras 		got = KVMPPC_GOT_PAGE;
311c77162deSPaul Mackerras 
312da9d1d7fSPaul Mackerras 		/* See if this is a large page */
313da9d1d7fSPaul Mackerras 		s = PAGE_SIZE;
314da9d1d7fSPaul Mackerras 		if (PageHuge(page)) {
315da9d1d7fSPaul Mackerras 			hpage = compound_head(page);
316da9d1d7fSPaul Mackerras 			s <<= compound_order(hpage);
317da9d1d7fSPaul Mackerras 			/* Get the whole large page if slot alignment is ok */
318da9d1d7fSPaul Mackerras 			if (s > psize && slot_is_aligned(memslot, s) &&
319da9d1d7fSPaul Mackerras 			    !(memslot->userspace_addr & (s - 1))) {
320da9d1d7fSPaul Mackerras 				start &= ~(s - 1);
321da9d1d7fSPaul Mackerras 				pgsize = s;
322de6c0b02SDavid Gibson 				get_page(hpage);
323de6c0b02SDavid Gibson 				put_page(page);
324da9d1d7fSPaul Mackerras 				page = hpage;
325c77162deSPaul Mackerras 			}
326da9d1d7fSPaul Mackerras 		}
327da9d1d7fSPaul Mackerras 		if (s < psize)
328da9d1d7fSPaul Mackerras 			goto out;
329c77162deSPaul Mackerras 		pfn = page_to_pfn(page);
3309d0ef5eaSPaul Mackerras 	}
331c77162deSPaul Mackerras 
332da9d1d7fSPaul Mackerras 	npages = pgsize >> PAGE_SHIFT;
333da9d1d7fSPaul Mackerras 	pgorder = __ilog2(npages);
334da9d1d7fSPaul Mackerras 	physp += (gfn - memslot->base_gfn) & ~(npages - 1);
335c77162deSPaul Mackerras 	spin_lock(&kvm->arch.slot_phys_lock);
336da9d1d7fSPaul Mackerras 	for (i = 0; i < npages; ++i) {
337da9d1d7fSPaul Mackerras 		if (!physp[i]) {
3389d0ef5eaSPaul Mackerras 			physp[i] = ((pfn + i) << PAGE_SHIFT) +
3399d0ef5eaSPaul Mackerras 				got + is_io + pgorder;
340da9d1d7fSPaul Mackerras 			got = 0;
341da9d1d7fSPaul Mackerras 		}
342da9d1d7fSPaul Mackerras 	}
343c77162deSPaul Mackerras 	spin_unlock(&kvm->arch.slot_phys_lock);
344da9d1d7fSPaul Mackerras 	err = 0;
345c77162deSPaul Mackerras 
346da9d1d7fSPaul Mackerras  out:
347de6c0b02SDavid Gibson 	if (got)
348da9d1d7fSPaul Mackerras 		put_page(page);
349da9d1d7fSPaul Mackerras 	return err;
3509d0ef5eaSPaul Mackerras 
3519d0ef5eaSPaul Mackerras  up_err:
3529d0ef5eaSPaul Mackerras 	up_read(&current->mm->mmap_sem);
3539d0ef5eaSPaul Mackerras 	return err;
354c77162deSPaul Mackerras }
355c77162deSPaul Mackerras 
356c77162deSPaul Mackerras /*
357342d3db7SPaul Mackerras  * We come here on a H_ENTER call from the guest when we are not
358342d3db7SPaul Mackerras  * using mmu notifiers and we don't have the requested page pinned
359342d3db7SPaul Mackerras  * already.
360c77162deSPaul Mackerras  */
361c77162deSPaul Mackerras long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
362c77162deSPaul Mackerras 			long pte_index, unsigned long pteh, unsigned long ptel)
363c77162deSPaul Mackerras {
364c77162deSPaul Mackerras 	struct kvm *kvm = vcpu->kvm;
365c77162deSPaul Mackerras 	unsigned long psize, gpa, gfn;
366c77162deSPaul Mackerras 	struct kvm_memory_slot *memslot;
367c77162deSPaul Mackerras 	long ret;
368c77162deSPaul Mackerras 
369342d3db7SPaul Mackerras 	if (kvm->arch.using_mmu_notifiers)
370342d3db7SPaul Mackerras 		goto do_insert;
371342d3db7SPaul Mackerras 
372c77162deSPaul Mackerras 	psize = hpte_page_size(pteh, ptel);
373c77162deSPaul Mackerras 	if (!psize)
374c77162deSPaul Mackerras 		return H_PARAMETER;
375c77162deSPaul Mackerras 
376697d3899SPaul Mackerras 	pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
377697d3899SPaul Mackerras 
378c77162deSPaul Mackerras 	/* Find the memslot (if any) for this address */
379c77162deSPaul Mackerras 	gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
380c77162deSPaul Mackerras 	gfn = gpa >> PAGE_SHIFT;
381c77162deSPaul Mackerras 	memslot = gfn_to_memslot(kvm, gfn);
382697d3899SPaul Mackerras 	if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
383da9d1d7fSPaul Mackerras 		if (!slot_is_aligned(memslot, psize))
384da9d1d7fSPaul Mackerras 			return H_PARAMETER;
385da9d1d7fSPaul Mackerras 		if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
386c77162deSPaul Mackerras 			return H_PARAMETER;
387697d3899SPaul Mackerras 	}
388c77162deSPaul Mackerras 
389342d3db7SPaul Mackerras  do_insert:
390342d3db7SPaul Mackerras 	/* Protect linux PTE lookup from page table destruction */
391342d3db7SPaul Mackerras 	rcu_read_lock_sched();	/* this disables preemption too */
392342d3db7SPaul Mackerras 	vcpu->arch.pgdir = current->mm->pgd;
393c77162deSPaul Mackerras 	ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
394342d3db7SPaul Mackerras 	rcu_read_unlock_sched();
395c77162deSPaul Mackerras 	if (ret == H_TOO_HARD) {
396c77162deSPaul Mackerras 		/* this can't happen */
397c77162deSPaul Mackerras 		pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
398c77162deSPaul Mackerras 		ret = H_RESOURCE;	/* or something */
399c77162deSPaul Mackerras 	}
400c77162deSPaul Mackerras 	return ret;
401c77162deSPaul Mackerras 
402c77162deSPaul Mackerras }
403c77162deSPaul Mackerras 
404697d3899SPaul Mackerras static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
405697d3899SPaul Mackerras 							 gva_t eaddr)
406697d3899SPaul Mackerras {
407697d3899SPaul Mackerras 	u64 mask;
408697d3899SPaul Mackerras 	int i;
409697d3899SPaul Mackerras 
410697d3899SPaul Mackerras 	for (i = 0; i < vcpu->arch.slb_nr; i++) {
411697d3899SPaul Mackerras 		if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
412697d3899SPaul Mackerras 			continue;
413697d3899SPaul Mackerras 
414697d3899SPaul Mackerras 		if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
415697d3899SPaul Mackerras 			mask = ESID_MASK_1T;
416697d3899SPaul Mackerras 		else
417697d3899SPaul Mackerras 			mask = ESID_MASK;
418697d3899SPaul Mackerras 
419697d3899SPaul Mackerras 		if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
420697d3899SPaul Mackerras 			return &vcpu->arch.slb[i];
421697d3899SPaul Mackerras 	}
422697d3899SPaul Mackerras 	return NULL;
423697d3899SPaul Mackerras }
424697d3899SPaul Mackerras 
425697d3899SPaul Mackerras static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
426697d3899SPaul Mackerras 			unsigned long ea)
427697d3899SPaul Mackerras {
428697d3899SPaul Mackerras 	unsigned long ra_mask;
429697d3899SPaul Mackerras 
430697d3899SPaul Mackerras 	ra_mask = hpte_page_size(v, r) - 1;
431697d3899SPaul Mackerras 	return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
432697d3899SPaul Mackerras }
433697d3899SPaul Mackerras 
434de56a948SPaul Mackerras static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
435de56a948SPaul Mackerras 			struct kvmppc_pte *gpte, bool data)
436de56a948SPaul Mackerras {
437697d3899SPaul Mackerras 	struct kvm *kvm = vcpu->kvm;
438697d3899SPaul Mackerras 	struct kvmppc_slb *slbe;
439697d3899SPaul Mackerras 	unsigned long slb_v;
440697d3899SPaul Mackerras 	unsigned long pp, key;
441697d3899SPaul Mackerras 	unsigned long v, gr;
442697d3899SPaul Mackerras 	unsigned long *hptep;
443697d3899SPaul Mackerras 	int index;
444697d3899SPaul Mackerras 	int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
445697d3899SPaul Mackerras 
446697d3899SPaul Mackerras 	/* Get SLB entry */
447697d3899SPaul Mackerras 	if (virtmode) {
448697d3899SPaul Mackerras 		slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
449697d3899SPaul Mackerras 		if (!slbe)
450697d3899SPaul Mackerras 			return -EINVAL;
451697d3899SPaul Mackerras 		slb_v = slbe->origv;
452697d3899SPaul Mackerras 	} else {
453697d3899SPaul Mackerras 		/* real mode access */
454697d3899SPaul Mackerras 		slb_v = vcpu->kvm->arch.vrma_slb_v;
455697d3899SPaul Mackerras 	}
456697d3899SPaul Mackerras 
457697d3899SPaul Mackerras 	/* Find the HPTE in the hash table */
458697d3899SPaul Mackerras 	index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
459697d3899SPaul Mackerras 					 HPTE_V_VALID | HPTE_V_ABSENT);
460697d3899SPaul Mackerras 	if (index < 0)
461de56a948SPaul Mackerras 		return -ENOENT;
462697d3899SPaul Mackerras 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
463697d3899SPaul Mackerras 	v = hptep[0] & ~HPTE_V_HVLOCK;
464697d3899SPaul Mackerras 	gr = kvm->arch.revmap[index].guest_rpte;
465697d3899SPaul Mackerras 
466697d3899SPaul Mackerras 	/* Unlock the HPTE */
467697d3899SPaul Mackerras 	asm volatile("lwsync" : : : "memory");
468697d3899SPaul Mackerras 	hptep[0] = v;
469697d3899SPaul Mackerras 
470697d3899SPaul Mackerras 	gpte->eaddr = eaddr;
471697d3899SPaul Mackerras 	gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
472697d3899SPaul Mackerras 
473697d3899SPaul Mackerras 	/* Get PP bits and key for permission check */
474697d3899SPaul Mackerras 	pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
475697d3899SPaul Mackerras 	key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
476697d3899SPaul Mackerras 	key &= slb_v;
477697d3899SPaul Mackerras 
478697d3899SPaul Mackerras 	/* Calculate permissions */
479697d3899SPaul Mackerras 	gpte->may_read = hpte_read_permission(pp, key);
480697d3899SPaul Mackerras 	gpte->may_write = hpte_write_permission(pp, key);
481697d3899SPaul Mackerras 	gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
482697d3899SPaul Mackerras 
483697d3899SPaul Mackerras 	/* Storage key permission check for POWER7 */
484697d3899SPaul Mackerras 	if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
485697d3899SPaul Mackerras 		int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
486697d3899SPaul Mackerras 		if (amrfield & 1)
487697d3899SPaul Mackerras 			gpte->may_read = 0;
488697d3899SPaul Mackerras 		if (amrfield & 2)
489697d3899SPaul Mackerras 			gpte->may_write = 0;
490697d3899SPaul Mackerras 	}
491697d3899SPaul Mackerras 
492697d3899SPaul Mackerras 	/* Get the guest physical address */
493697d3899SPaul Mackerras 	gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
494697d3899SPaul Mackerras 	return 0;
495697d3899SPaul Mackerras }
496697d3899SPaul Mackerras 
497697d3899SPaul Mackerras /*
498697d3899SPaul Mackerras  * Quick test for whether an instruction is a load or a store.
499697d3899SPaul Mackerras  * If the instruction is a load or a store, then this will indicate
500697d3899SPaul Mackerras  * which it is, at least on server processors.  (Embedded processors
501697d3899SPaul Mackerras  * have some external PID instructions that don't follow the rule
502697d3899SPaul Mackerras  * embodied here.)  If the instruction isn't a load or store, then
503697d3899SPaul Mackerras  * this doesn't return anything useful.
504697d3899SPaul Mackerras  */
505697d3899SPaul Mackerras static int instruction_is_store(unsigned int instr)
506697d3899SPaul Mackerras {
507697d3899SPaul Mackerras 	unsigned int mask;
508697d3899SPaul Mackerras 
509697d3899SPaul Mackerras 	mask = 0x10000000;
510697d3899SPaul Mackerras 	if ((instr & 0xfc000000) == 0x7c000000)
511697d3899SPaul Mackerras 		mask = 0x100;		/* major opcode 31 */
512697d3899SPaul Mackerras 	return (instr & mask) != 0;
513697d3899SPaul Mackerras }
514697d3899SPaul Mackerras 
515697d3899SPaul Mackerras static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
5166020c0f6SAlexander Graf 				  unsigned long gpa, gva_t ea, int is_store)
517697d3899SPaul Mackerras {
518697d3899SPaul Mackerras 	int ret;
519697d3899SPaul Mackerras 	u32 last_inst;
520697d3899SPaul Mackerras 	unsigned long srr0 = kvmppc_get_pc(vcpu);
521697d3899SPaul Mackerras 
522697d3899SPaul Mackerras 	/* We try to load the last instruction.  We don't let
523697d3899SPaul Mackerras 	 * emulate_instruction do it as it doesn't check what
524697d3899SPaul Mackerras 	 * kvmppc_ld returns.
525697d3899SPaul Mackerras 	 * If we fail, we just return to the guest and try executing it again.
526697d3899SPaul Mackerras 	 */
527697d3899SPaul Mackerras 	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
528697d3899SPaul Mackerras 		ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
529697d3899SPaul Mackerras 		if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
530697d3899SPaul Mackerras 			return RESUME_GUEST;
531697d3899SPaul Mackerras 		vcpu->arch.last_inst = last_inst;
532697d3899SPaul Mackerras 	}
533697d3899SPaul Mackerras 
534697d3899SPaul Mackerras 	/*
535697d3899SPaul Mackerras 	 * WARNING: We do not know for sure whether the instruction we just
536697d3899SPaul Mackerras 	 * read from memory is the same that caused the fault in the first
537697d3899SPaul Mackerras 	 * place.  If the instruction we read is neither an load or a store,
538697d3899SPaul Mackerras 	 * then it can't access memory, so we don't need to worry about
539697d3899SPaul Mackerras 	 * enforcing access permissions.  So, assuming it is a load or
540697d3899SPaul Mackerras 	 * store, we just check that its direction (load or store) is
541697d3899SPaul Mackerras 	 * consistent with the original fault, since that's what we
542697d3899SPaul Mackerras 	 * checked the access permissions against.  If there is a mismatch
543697d3899SPaul Mackerras 	 * we just return and retry the instruction.
544697d3899SPaul Mackerras 	 */
545697d3899SPaul Mackerras 
546697d3899SPaul Mackerras 	if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
547697d3899SPaul Mackerras 		return RESUME_GUEST;
548697d3899SPaul Mackerras 
549697d3899SPaul Mackerras 	/*
550697d3899SPaul Mackerras 	 * Emulated accesses are emulated by looking at the hash for
551697d3899SPaul Mackerras 	 * translation once, then performing the access later. The
552697d3899SPaul Mackerras 	 * translation could be invalidated in the meantime in which
553697d3899SPaul Mackerras 	 * point performing the subsequent memory access on the old
554697d3899SPaul Mackerras 	 * physical address could possibly be a security hole for the
555697d3899SPaul Mackerras 	 * guest (but not the host).
556697d3899SPaul Mackerras 	 *
557697d3899SPaul Mackerras 	 * This is less of an issue for MMIO stores since they aren't
558697d3899SPaul Mackerras 	 * globally visible. It could be an issue for MMIO loads to
559697d3899SPaul Mackerras 	 * a certain extent but we'll ignore it for now.
560697d3899SPaul Mackerras 	 */
561697d3899SPaul Mackerras 
562697d3899SPaul Mackerras 	vcpu->arch.paddr_accessed = gpa;
5636020c0f6SAlexander Graf 	vcpu->arch.vaddr_accessed = ea;
564697d3899SPaul Mackerras 	return kvmppc_emulate_mmio(run, vcpu);
565697d3899SPaul Mackerras }
566697d3899SPaul Mackerras 
567697d3899SPaul Mackerras int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
568697d3899SPaul Mackerras 				unsigned long ea, unsigned long dsisr)
569697d3899SPaul Mackerras {
570697d3899SPaul Mackerras 	struct kvm *kvm = vcpu->kvm;
571342d3db7SPaul Mackerras 	unsigned long *hptep, hpte[3], r;
572342d3db7SPaul Mackerras 	unsigned long mmu_seq, psize, pte_size;
573342d3db7SPaul Mackerras 	unsigned long gfn, hva, pfn;
574697d3899SPaul Mackerras 	struct kvm_memory_slot *memslot;
575342d3db7SPaul Mackerras 	unsigned long *rmap;
576697d3899SPaul Mackerras 	struct revmap_entry *rev;
577342d3db7SPaul Mackerras 	struct page *page, *pages[1];
578342d3db7SPaul Mackerras 	long index, ret, npages;
579342d3db7SPaul Mackerras 	unsigned long is_io;
5804cf302bcSPaul Mackerras 	unsigned int writing, write_ok;
581342d3db7SPaul Mackerras 	struct vm_area_struct *vma;
582bad3b507SPaul Mackerras 	unsigned long rcbits;
583697d3899SPaul Mackerras 
584697d3899SPaul Mackerras 	/*
585697d3899SPaul Mackerras 	 * Real-mode code has already searched the HPT and found the
586697d3899SPaul Mackerras 	 * entry we're interested in.  Lock the entry and check that
587697d3899SPaul Mackerras 	 * it hasn't changed.  If it has, just return and re-execute the
588697d3899SPaul Mackerras 	 * instruction.
589697d3899SPaul Mackerras 	 */
590697d3899SPaul Mackerras 	if (ea != vcpu->arch.pgfault_addr)
591697d3899SPaul Mackerras 		return RESUME_GUEST;
592697d3899SPaul Mackerras 	index = vcpu->arch.pgfault_index;
593697d3899SPaul Mackerras 	hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
594697d3899SPaul Mackerras 	rev = &kvm->arch.revmap[index];
595697d3899SPaul Mackerras 	preempt_disable();
596697d3899SPaul Mackerras 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
597697d3899SPaul Mackerras 		cpu_relax();
598697d3899SPaul Mackerras 	hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
599697d3899SPaul Mackerras 	hpte[1] = hptep[1];
600342d3db7SPaul Mackerras 	hpte[2] = r = rev->guest_rpte;
601697d3899SPaul Mackerras 	asm volatile("lwsync" : : : "memory");
602697d3899SPaul Mackerras 	hptep[0] = hpte[0];
603697d3899SPaul Mackerras 	preempt_enable();
604697d3899SPaul Mackerras 
605697d3899SPaul Mackerras 	if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
606697d3899SPaul Mackerras 	    hpte[1] != vcpu->arch.pgfault_hpte[1])
607697d3899SPaul Mackerras 		return RESUME_GUEST;
608697d3899SPaul Mackerras 
609697d3899SPaul Mackerras 	/* Translate the logical address and get the page */
610342d3db7SPaul Mackerras 	psize = hpte_page_size(hpte[0], r);
611342d3db7SPaul Mackerras 	gfn = hpte_rpn(r, psize);
612697d3899SPaul Mackerras 	memslot = gfn_to_memslot(kvm, gfn);
613697d3899SPaul Mackerras 
614697d3899SPaul Mackerras 	/* No memslot means it's an emulated MMIO region */
615697d3899SPaul Mackerras 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
616697d3899SPaul Mackerras 		unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
6176020c0f6SAlexander Graf 		return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
618697d3899SPaul Mackerras 					      dsisr & DSISR_ISSTORE);
619697d3899SPaul Mackerras 	}
620697d3899SPaul Mackerras 
621342d3db7SPaul Mackerras 	if (!kvm->arch.using_mmu_notifiers)
622342d3db7SPaul Mackerras 		return -EFAULT;		/* should never get here */
623342d3db7SPaul Mackerras 
624342d3db7SPaul Mackerras 	/* used to check for invalidations in progress */
625342d3db7SPaul Mackerras 	mmu_seq = kvm->mmu_notifier_seq;
626342d3db7SPaul Mackerras 	smp_rmb();
627342d3db7SPaul Mackerras 
628342d3db7SPaul Mackerras 	is_io = 0;
629342d3db7SPaul Mackerras 	pfn = 0;
630342d3db7SPaul Mackerras 	page = NULL;
631342d3db7SPaul Mackerras 	pte_size = PAGE_SIZE;
6324cf302bcSPaul Mackerras 	writing = (dsisr & DSISR_ISSTORE) != 0;
6334cf302bcSPaul Mackerras 	/* If writing != 0, then the HPTE must allow writing, if we get here */
6344cf302bcSPaul Mackerras 	write_ok = writing;
635342d3db7SPaul Mackerras 	hva = gfn_to_hva_memslot(memslot, gfn);
6364cf302bcSPaul Mackerras 	npages = get_user_pages_fast(hva, 1, writing, pages);
637342d3db7SPaul Mackerras 	if (npages < 1) {
638342d3db7SPaul Mackerras 		/* Check if it's an I/O mapping */
639342d3db7SPaul Mackerras 		down_read(&current->mm->mmap_sem);
640342d3db7SPaul Mackerras 		vma = find_vma(current->mm, hva);
641342d3db7SPaul Mackerras 		if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
642342d3db7SPaul Mackerras 		    (vma->vm_flags & VM_PFNMAP)) {
643342d3db7SPaul Mackerras 			pfn = vma->vm_pgoff +
644342d3db7SPaul Mackerras 				((hva - vma->vm_start) >> PAGE_SHIFT);
645342d3db7SPaul Mackerras 			pte_size = psize;
646342d3db7SPaul Mackerras 			is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
6474cf302bcSPaul Mackerras 			write_ok = vma->vm_flags & VM_WRITE;
648342d3db7SPaul Mackerras 		}
649342d3db7SPaul Mackerras 		up_read(&current->mm->mmap_sem);
650342d3db7SPaul Mackerras 		if (!pfn)
651697d3899SPaul Mackerras 			return -EFAULT;
652342d3db7SPaul Mackerras 	} else {
653342d3db7SPaul Mackerras 		page = pages[0];
654342d3db7SPaul Mackerras 		if (PageHuge(page)) {
655342d3db7SPaul Mackerras 			page = compound_head(page);
656342d3db7SPaul Mackerras 			pte_size <<= compound_order(page);
657342d3db7SPaul Mackerras 		}
6584cf302bcSPaul Mackerras 		/* if the guest wants write access, see if that is OK */
6594cf302bcSPaul Mackerras 		if (!writing && hpte_is_writable(r)) {
6604cf302bcSPaul Mackerras 			pte_t *ptep, pte;
6614cf302bcSPaul Mackerras 
6624cf302bcSPaul Mackerras 			/*
6634cf302bcSPaul Mackerras 			 * We need to protect against page table destruction
6644cf302bcSPaul Mackerras 			 * while looking up and updating the pte.
6654cf302bcSPaul Mackerras 			 */
6664cf302bcSPaul Mackerras 			rcu_read_lock_sched();
6674cf302bcSPaul Mackerras 			ptep = find_linux_pte_or_hugepte(current->mm->pgd,
6684cf302bcSPaul Mackerras 							 hva, NULL);
6694cf302bcSPaul Mackerras 			if (ptep && pte_present(*ptep)) {
6704cf302bcSPaul Mackerras 				pte = kvmppc_read_update_linux_pte(ptep, 1);
6714cf302bcSPaul Mackerras 				if (pte_write(pte))
6724cf302bcSPaul Mackerras 					write_ok = 1;
6734cf302bcSPaul Mackerras 			}
6744cf302bcSPaul Mackerras 			rcu_read_unlock_sched();
6754cf302bcSPaul Mackerras 		}
676342d3db7SPaul Mackerras 		pfn = page_to_pfn(page);
677342d3db7SPaul Mackerras 	}
678342d3db7SPaul Mackerras 
679342d3db7SPaul Mackerras 	ret = -EFAULT;
680342d3db7SPaul Mackerras 	if (psize > pte_size)
681342d3db7SPaul Mackerras 		goto out_put;
682342d3db7SPaul Mackerras 
683342d3db7SPaul Mackerras 	/* Check WIMG vs. the actual page we're accessing */
684342d3db7SPaul Mackerras 	if (!hpte_cache_flags_ok(r, is_io)) {
685342d3db7SPaul Mackerras 		if (is_io)
686342d3db7SPaul Mackerras 			return -EFAULT;
687342d3db7SPaul Mackerras 		/*
688342d3db7SPaul Mackerras 		 * Allow guest to map emulated device memory as
689342d3db7SPaul Mackerras 		 * uncacheable, but actually make it cacheable.
690342d3db7SPaul Mackerras 		 */
691342d3db7SPaul Mackerras 		r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
692342d3db7SPaul Mackerras 	}
693342d3db7SPaul Mackerras 
694342d3db7SPaul Mackerras 	/* Set the HPTE to point to pfn */
695342d3db7SPaul Mackerras 	r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
6964cf302bcSPaul Mackerras 	if (hpte_is_writable(r) && !write_ok)
6974cf302bcSPaul Mackerras 		r = hpte_make_readonly(r);
698342d3db7SPaul Mackerras 	ret = RESUME_GUEST;
699342d3db7SPaul Mackerras 	preempt_disable();
700342d3db7SPaul Mackerras 	while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
701342d3db7SPaul Mackerras 		cpu_relax();
702342d3db7SPaul Mackerras 	if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
703342d3db7SPaul Mackerras 	    rev->guest_rpte != hpte[2])
704342d3db7SPaul Mackerras 		/* HPTE has been changed under us; let the guest retry */
705342d3db7SPaul Mackerras 		goto out_unlock;
706342d3db7SPaul Mackerras 	hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
707342d3db7SPaul Mackerras 
708342d3db7SPaul Mackerras 	rmap = &memslot->rmap[gfn - memslot->base_gfn];
709342d3db7SPaul Mackerras 	lock_rmap(rmap);
710342d3db7SPaul Mackerras 
711342d3db7SPaul Mackerras 	/* Check if we might have been invalidated; let the guest retry if so */
712342d3db7SPaul Mackerras 	ret = RESUME_GUEST;
713342d3db7SPaul Mackerras 	if (mmu_notifier_retry(vcpu, mmu_seq)) {
714342d3db7SPaul Mackerras 		unlock_rmap(rmap);
715342d3db7SPaul Mackerras 		goto out_unlock;
716342d3db7SPaul Mackerras 	}
7174cf302bcSPaul Mackerras 
718bad3b507SPaul Mackerras 	/* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
719bad3b507SPaul Mackerras 	rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
720bad3b507SPaul Mackerras 	r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
721bad3b507SPaul Mackerras 
7224cf302bcSPaul Mackerras 	if (hptep[0] & HPTE_V_VALID) {
7234cf302bcSPaul Mackerras 		/* HPTE was previously valid, so we need to invalidate it */
7244cf302bcSPaul Mackerras 		unlock_rmap(rmap);
7254cf302bcSPaul Mackerras 		hptep[0] |= HPTE_V_ABSENT;
7264cf302bcSPaul Mackerras 		kvmppc_invalidate_hpte(kvm, hptep, index);
727bad3b507SPaul Mackerras 		/* don't lose previous R and C bits */
728bad3b507SPaul Mackerras 		r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
7294cf302bcSPaul Mackerras 	} else {
730342d3db7SPaul Mackerras 		kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
7314cf302bcSPaul Mackerras 	}
732342d3db7SPaul Mackerras 
733342d3db7SPaul Mackerras 	hptep[1] = r;
734342d3db7SPaul Mackerras 	eieio();
735342d3db7SPaul Mackerras 	hptep[0] = hpte[0];
736342d3db7SPaul Mackerras 	asm volatile("ptesync" : : : "memory");
737342d3db7SPaul Mackerras 	preempt_enable();
7384cf302bcSPaul Mackerras 	if (page && hpte_is_writable(r))
739342d3db7SPaul Mackerras 		SetPageDirty(page);
740342d3db7SPaul Mackerras 
741342d3db7SPaul Mackerras  out_put:
742de6c0b02SDavid Gibson 	if (page) {
743de6c0b02SDavid Gibson 		/*
744de6c0b02SDavid Gibson 		 * We drop pages[0] here, not page because page might
745de6c0b02SDavid Gibson 		 * have been set to the head page of a compound, but
746de6c0b02SDavid Gibson 		 * we have to drop the reference on the correct tail
747de6c0b02SDavid Gibson 		 * page to match the get inside gup()
748de6c0b02SDavid Gibson 		 */
749de6c0b02SDavid Gibson 		put_page(pages[0]);
750de6c0b02SDavid Gibson 	}
751342d3db7SPaul Mackerras 	return ret;
752342d3db7SPaul Mackerras 
753342d3db7SPaul Mackerras  out_unlock:
754342d3db7SPaul Mackerras 	hptep[0] &= ~HPTE_V_HVLOCK;
755342d3db7SPaul Mackerras 	preempt_enable();
756342d3db7SPaul Mackerras 	goto out_put;
757342d3db7SPaul Mackerras }
758342d3db7SPaul Mackerras 
75984504ef3STakuya Yoshikawa static int kvm_handle_hva_range(struct kvm *kvm,
76084504ef3STakuya Yoshikawa 				unsigned long start,
76184504ef3STakuya Yoshikawa 				unsigned long end,
76284504ef3STakuya Yoshikawa 				int (*handler)(struct kvm *kvm,
76384504ef3STakuya Yoshikawa 					       unsigned long *rmapp,
764342d3db7SPaul Mackerras 					       unsigned long gfn))
765342d3db7SPaul Mackerras {
766342d3db7SPaul Mackerras 	int ret;
767342d3db7SPaul Mackerras 	int retval = 0;
768342d3db7SPaul Mackerras 	struct kvm_memslots *slots;
769342d3db7SPaul Mackerras 	struct kvm_memory_slot *memslot;
770342d3db7SPaul Mackerras 
771342d3db7SPaul Mackerras 	slots = kvm_memslots(kvm);
772342d3db7SPaul Mackerras 	kvm_for_each_memslot(memslot, slots) {
77384504ef3STakuya Yoshikawa 		unsigned long hva_start, hva_end;
77484504ef3STakuya Yoshikawa 		gfn_t gfn, gfn_end;
775342d3db7SPaul Mackerras 
77684504ef3STakuya Yoshikawa 		hva_start = max(start, memslot->userspace_addr);
77784504ef3STakuya Yoshikawa 		hva_end = min(end, memslot->userspace_addr +
77884504ef3STakuya Yoshikawa 					(memslot->npages << PAGE_SHIFT));
77984504ef3STakuya Yoshikawa 		if (hva_start >= hva_end)
78084504ef3STakuya Yoshikawa 			continue;
78184504ef3STakuya Yoshikawa 		/*
78284504ef3STakuya Yoshikawa 		 * {gfn(page) | page intersects with [hva_start, hva_end)} =
78384504ef3STakuya Yoshikawa 		 * {gfn, gfn+1, ..., gfn_end-1}.
78484504ef3STakuya Yoshikawa 		 */
78584504ef3STakuya Yoshikawa 		gfn = hva_to_gfn_memslot(hva_start, memslot);
78684504ef3STakuya Yoshikawa 		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
78784504ef3STakuya Yoshikawa 
78884504ef3STakuya Yoshikawa 		for (; gfn < gfn_end; ++gfn) {
789d19a748bSTakuya Yoshikawa 			gfn_t gfn_offset = gfn - memslot->base_gfn;
790342d3db7SPaul Mackerras 
791d19a748bSTakuya Yoshikawa 			ret = handler(kvm, &memslot->rmap[gfn_offset], gfn);
792342d3db7SPaul Mackerras 			retval |= ret;
793342d3db7SPaul Mackerras 		}
794342d3db7SPaul Mackerras 	}
795342d3db7SPaul Mackerras 
796342d3db7SPaul Mackerras 	return retval;
797342d3db7SPaul Mackerras }
798342d3db7SPaul Mackerras 
79984504ef3STakuya Yoshikawa static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
80084504ef3STakuya Yoshikawa 			  int (*handler)(struct kvm *kvm, unsigned long *rmapp,
80184504ef3STakuya Yoshikawa 					 unsigned long gfn))
80284504ef3STakuya Yoshikawa {
80384504ef3STakuya Yoshikawa 	return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
80484504ef3STakuya Yoshikawa }
80584504ef3STakuya Yoshikawa 
806342d3db7SPaul Mackerras static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
807342d3db7SPaul Mackerras 			   unsigned long gfn)
808342d3db7SPaul Mackerras {
809342d3db7SPaul Mackerras 	struct revmap_entry *rev = kvm->arch.revmap;
810342d3db7SPaul Mackerras 	unsigned long h, i, j;
811342d3db7SPaul Mackerras 	unsigned long *hptep;
812bad3b507SPaul Mackerras 	unsigned long ptel, psize, rcbits;
813342d3db7SPaul Mackerras 
814342d3db7SPaul Mackerras 	for (;;) {
815bad3b507SPaul Mackerras 		lock_rmap(rmapp);
816342d3db7SPaul Mackerras 		if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
817bad3b507SPaul Mackerras 			unlock_rmap(rmapp);
818342d3db7SPaul Mackerras 			break;
819342d3db7SPaul Mackerras 		}
820342d3db7SPaul Mackerras 
821342d3db7SPaul Mackerras 		/*
822342d3db7SPaul Mackerras 		 * To avoid an ABBA deadlock with the HPTE lock bit,
823bad3b507SPaul Mackerras 		 * we can't spin on the HPTE lock while holding the
824bad3b507SPaul Mackerras 		 * rmap chain lock.
825342d3db7SPaul Mackerras 		 */
826342d3db7SPaul Mackerras 		i = *rmapp & KVMPPC_RMAP_INDEX;
827bad3b507SPaul Mackerras 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
828bad3b507SPaul Mackerras 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
829bad3b507SPaul Mackerras 			/* unlock rmap before spinning on the HPTE lock */
830bad3b507SPaul Mackerras 			unlock_rmap(rmapp);
831bad3b507SPaul Mackerras 			while (hptep[0] & HPTE_V_HVLOCK)
832bad3b507SPaul Mackerras 				cpu_relax();
833bad3b507SPaul Mackerras 			continue;
834bad3b507SPaul Mackerras 		}
835342d3db7SPaul Mackerras 		j = rev[i].forw;
836342d3db7SPaul Mackerras 		if (j == i) {
837342d3db7SPaul Mackerras 			/* chain is now empty */
838bad3b507SPaul Mackerras 			*rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
839342d3db7SPaul Mackerras 		} else {
840342d3db7SPaul Mackerras 			/* remove i from chain */
841342d3db7SPaul Mackerras 			h = rev[i].back;
842342d3db7SPaul Mackerras 			rev[h].forw = j;
843342d3db7SPaul Mackerras 			rev[j].back = h;
844342d3db7SPaul Mackerras 			rev[i].forw = rev[i].back = i;
845bad3b507SPaul Mackerras 			*rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
846342d3db7SPaul Mackerras 		}
847342d3db7SPaul Mackerras 
848bad3b507SPaul Mackerras 		/* Now check and modify the HPTE */
849342d3db7SPaul Mackerras 		ptel = rev[i].guest_rpte;
850342d3db7SPaul Mackerras 		psize = hpte_page_size(hptep[0], ptel);
851342d3db7SPaul Mackerras 		if ((hptep[0] & HPTE_V_VALID) &&
852342d3db7SPaul Mackerras 		    hpte_rpn(ptel, psize) == gfn) {
853342d3db7SPaul Mackerras 			hptep[0] |= HPTE_V_ABSENT;
854bad3b507SPaul Mackerras 			kvmppc_invalidate_hpte(kvm, hptep, i);
855bad3b507SPaul Mackerras 			/* Harvest R and C */
856bad3b507SPaul Mackerras 			rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
857bad3b507SPaul Mackerras 			*rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
858bad3b507SPaul Mackerras 			rev[i].guest_rpte = ptel | rcbits;
859342d3db7SPaul Mackerras 		}
860bad3b507SPaul Mackerras 		unlock_rmap(rmapp);
861342d3db7SPaul Mackerras 		hptep[0] &= ~HPTE_V_HVLOCK;
862342d3db7SPaul Mackerras 	}
863342d3db7SPaul Mackerras 	return 0;
864342d3db7SPaul Mackerras }
865342d3db7SPaul Mackerras 
866342d3db7SPaul Mackerras int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
867342d3db7SPaul Mackerras {
868342d3db7SPaul Mackerras 	if (kvm->arch.using_mmu_notifiers)
869342d3db7SPaul Mackerras 		kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
870342d3db7SPaul Mackerras 	return 0;
871342d3db7SPaul Mackerras }
872342d3db7SPaul Mackerras 
873342d3db7SPaul Mackerras static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
874342d3db7SPaul Mackerras 			 unsigned long gfn)
875342d3db7SPaul Mackerras {
87655514893SPaul Mackerras 	struct revmap_entry *rev = kvm->arch.revmap;
87755514893SPaul Mackerras 	unsigned long head, i, j;
87855514893SPaul Mackerras 	unsigned long *hptep;
87955514893SPaul Mackerras 	int ret = 0;
88055514893SPaul Mackerras 
88155514893SPaul Mackerras  retry:
88255514893SPaul Mackerras 	lock_rmap(rmapp);
88355514893SPaul Mackerras 	if (*rmapp & KVMPPC_RMAP_REFERENCED) {
884bad3b507SPaul Mackerras 		*rmapp &= ~KVMPPC_RMAP_REFERENCED;
88555514893SPaul Mackerras 		ret = 1;
88655514893SPaul Mackerras 	}
88755514893SPaul Mackerras 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
88855514893SPaul Mackerras 		unlock_rmap(rmapp);
88955514893SPaul Mackerras 		return ret;
89055514893SPaul Mackerras 	}
89155514893SPaul Mackerras 
89255514893SPaul Mackerras 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
89355514893SPaul Mackerras 	do {
89455514893SPaul Mackerras 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
89555514893SPaul Mackerras 		j = rev[i].forw;
89655514893SPaul Mackerras 
89755514893SPaul Mackerras 		/* If this HPTE isn't referenced, ignore it */
89855514893SPaul Mackerras 		if (!(hptep[1] & HPTE_R_R))
89955514893SPaul Mackerras 			continue;
90055514893SPaul Mackerras 
90155514893SPaul Mackerras 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
90255514893SPaul Mackerras 			/* unlock rmap before spinning on the HPTE lock */
90355514893SPaul Mackerras 			unlock_rmap(rmapp);
90455514893SPaul Mackerras 			while (hptep[0] & HPTE_V_HVLOCK)
90555514893SPaul Mackerras 				cpu_relax();
90655514893SPaul Mackerras 			goto retry;
90755514893SPaul Mackerras 		}
90855514893SPaul Mackerras 
90955514893SPaul Mackerras 		/* Now check and modify the HPTE */
91055514893SPaul Mackerras 		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
91155514893SPaul Mackerras 			kvmppc_clear_ref_hpte(kvm, hptep, i);
91255514893SPaul Mackerras 			rev[i].guest_rpte |= HPTE_R_R;
91355514893SPaul Mackerras 			ret = 1;
91455514893SPaul Mackerras 		}
91555514893SPaul Mackerras 		hptep[0] &= ~HPTE_V_HVLOCK;
91655514893SPaul Mackerras 	} while ((i = j) != head);
91755514893SPaul Mackerras 
91855514893SPaul Mackerras 	unlock_rmap(rmapp);
91955514893SPaul Mackerras 	return ret;
920342d3db7SPaul Mackerras }
921342d3db7SPaul Mackerras 
922342d3db7SPaul Mackerras int kvm_age_hva(struct kvm *kvm, unsigned long hva)
923342d3db7SPaul Mackerras {
924342d3db7SPaul Mackerras 	if (!kvm->arch.using_mmu_notifiers)
925342d3db7SPaul Mackerras 		return 0;
926342d3db7SPaul Mackerras 	return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
927342d3db7SPaul Mackerras }
928342d3db7SPaul Mackerras 
929342d3db7SPaul Mackerras static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
930342d3db7SPaul Mackerras 			      unsigned long gfn)
931342d3db7SPaul Mackerras {
93255514893SPaul Mackerras 	struct revmap_entry *rev = kvm->arch.revmap;
93355514893SPaul Mackerras 	unsigned long head, i, j;
93455514893SPaul Mackerras 	unsigned long *hp;
93555514893SPaul Mackerras 	int ret = 1;
93655514893SPaul Mackerras 
93755514893SPaul Mackerras 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
93855514893SPaul Mackerras 		return 1;
93955514893SPaul Mackerras 
94055514893SPaul Mackerras 	lock_rmap(rmapp);
94155514893SPaul Mackerras 	if (*rmapp & KVMPPC_RMAP_REFERENCED)
94255514893SPaul Mackerras 		goto out;
94355514893SPaul Mackerras 
94455514893SPaul Mackerras 	if (*rmapp & KVMPPC_RMAP_PRESENT) {
94555514893SPaul Mackerras 		i = head = *rmapp & KVMPPC_RMAP_INDEX;
94655514893SPaul Mackerras 		do {
94755514893SPaul Mackerras 			hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
94855514893SPaul Mackerras 			j = rev[i].forw;
94955514893SPaul Mackerras 			if (hp[1] & HPTE_R_R)
95055514893SPaul Mackerras 				goto out;
95155514893SPaul Mackerras 		} while ((i = j) != head);
95255514893SPaul Mackerras 	}
95355514893SPaul Mackerras 	ret = 0;
95455514893SPaul Mackerras 
95555514893SPaul Mackerras  out:
95655514893SPaul Mackerras 	unlock_rmap(rmapp);
95755514893SPaul Mackerras 	return ret;
958342d3db7SPaul Mackerras }
959342d3db7SPaul Mackerras 
960342d3db7SPaul Mackerras int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
961342d3db7SPaul Mackerras {
962342d3db7SPaul Mackerras 	if (!kvm->arch.using_mmu_notifiers)
963342d3db7SPaul Mackerras 		return 0;
964342d3db7SPaul Mackerras 	return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
965342d3db7SPaul Mackerras }
966342d3db7SPaul Mackerras 
967342d3db7SPaul Mackerras void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
968342d3db7SPaul Mackerras {
969342d3db7SPaul Mackerras 	if (!kvm->arch.using_mmu_notifiers)
970342d3db7SPaul Mackerras 		return;
971342d3db7SPaul Mackerras 	kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
972de56a948SPaul Mackerras }
973de56a948SPaul Mackerras 
97482ed3616SPaul Mackerras static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
97582ed3616SPaul Mackerras {
97682ed3616SPaul Mackerras 	struct revmap_entry *rev = kvm->arch.revmap;
97782ed3616SPaul Mackerras 	unsigned long head, i, j;
97882ed3616SPaul Mackerras 	unsigned long *hptep;
97982ed3616SPaul Mackerras 	int ret = 0;
98082ed3616SPaul Mackerras 
98182ed3616SPaul Mackerras  retry:
98282ed3616SPaul Mackerras 	lock_rmap(rmapp);
98382ed3616SPaul Mackerras 	if (*rmapp & KVMPPC_RMAP_CHANGED) {
98482ed3616SPaul Mackerras 		*rmapp &= ~KVMPPC_RMAP_CHANGED;
98582ed3616SPaul Mackerras 		ret = 1;
98682ed3616SPaul Mackerras 	}
98782ed3616SPaul Mackerras 	if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
98882ed3616SPaul Mackerras 		unlock_rmap(rmapp);
98982ed3616SPaul Mackerras 		return ret;
99082ed3616SPaul Mackerras 	}
99182ed3616SPaul Mackerras 
99282ed3616SPaul Mackerras 	i = head = *rmapp & KVMPPC_RMAP_INDEX;
99382ed3616SPaul Mackerras 	do {
99482ed3616SPaul Mackerras 		hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
99582ed3616SPaul Mackerras 		j = rev[i].forw;
99682ed3616SPaul Mackerras 
99782ed3616SPaul Mackerras 		if (!(hptep[1] & HPTE_R_C))
99882ed3616SPaul Mackerras 			continue;
99982ed3616SPaul Mackerras 
100082ed3616SPaul Mackerras 		if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
100182ed3616SPaul Mackerras 			/* unlock rmap before spinning on the HPTE lock */
100282ed3616SPaul Mackerras 			unlock_rmap(rmapp);
100382ed3616SPaul Mackerras 			while (hptep[0] & HPTE_V_HVLOCK)
100482ed3616SPaul Mackerras 				cpu_relax();
100582ed3616SPaul Mackerras 			goto retry;
100682ed3616SPaul Mackerras 		}
100782ed3616SPaul Mackerras 
100882ed3616SPaul Mackerras 		/* Now check and modify the HPTE */
100982ed3616SPaul Mackerras 		if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
101082ed3616SPaul Mackerras 			/* need to make it temporarily absent to clear C */
101182ed3616SPaul Mackerras 			hptep[0] |= HPTE_V_ABSENT;
101282ed3616SPaul Mackerras 			kvmppc_invalidate_hpte(kvm, hptep, i);
101382ed3616SPaul Mackerras 			hptep[1] &= ~HPTE_R_C;
101482ed3616SPaul Mackerras 			eieio();
101582ed3616SPaul Mackerras 			hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
101682ed3616SPaul Mackerras 			rev[i].guest_rpte |= HPTE_R_C;
101782ed3616SPaul Mackerras 			ret = 1;
101882ed3616SPaul Mackerras 		}
101982ed3616SPaul Mackerras 		hptep[0] &= ~HPTE_V_HVLOCK;
102082ed3616SPaul Mackerras 	} while ((i = j) != head);
102182ed3616SPaul Mackerras 
102282ed3616SPaul Mackerras 	unlock_rmap(rmapp);
102382ed3616SPaul Mackerras 	return ret;
102482ed3616SPaul Mackerras }
102582ed3616SPaul Mackerras 
102682ed3616SPaul Mackerras long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
102782ed3616SPaul Mackerras {
102882ed3616SPaul Mackerras 	unsigned long i;
102982ed3616SPaul Mackerras 	unsigned long *rmapp, *map;
103082ed3616SPaul Mackerras 
103182ed3616SPaul Mackerras 	preempt_disable();
103282ed3616SPaul Mackerras 	rmapp = memslot->rmap;
103382ed3616SPaul Mackerras 	map = memslot->dirty_bitmap;
103482ed3616SPaul Mackerras 	for (i = 0; i < memslot->npages; ++i) {
103582ed3616SPaul Mackerras 		if (kvm_test_clear_dirty(kvm, rmapp))
103682ed3616SPaul Mackerras 			__set_bit_le(i, map);
103782ed3616SPaul Mackerras 		++rmapp;
103882ed3616SPaul Mackerras 	}
103982ed3616SPaul Mackerras 	preempt_enable();
104082ed3616SPaul Mackerras 	return 0;
104182ed3616SPaul Mackerras }
104282ed3616SPaul Mackerras 
104393e60249SPaul Mackerras void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
104493e60249SPaul Mackerras 			    unsigned long *nb_ret)
104593e60249SPaul Mackerras {
104693e60249SPaul Mackerras 	struct kvm_memory_slot *memslot;
104793e60249SPaul Mackerras 	unsigned long gfn = gpa >> PAGE_SHIFT;
1048342d3db7SPaul Mackerras 	struct page *page, *pages[1];
1049342d3db7SPaul Mackerras 	int npages;
1050342d3db7SPaul Mackerras 	unsigned long hva, psize, offset;
1051da9d1d7fSPaul Mackerras 	unsigned long pa;
105293e60249SPaul Mackerras 	unsigned long *physp;
105393e60249SPaul Mackerras 
105493e60249SPaul Mackerras 	memslot = gfn_to_memslot(kvm, gfn);
105593e60249SPaul Mackerras 	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
105693e60249SPaul Mackerras 		return NULL;
1057342d3db7SPaul Mackerras 	if (!kvm->arch.using_mmu_notifiers) {
105893e60249SPaul Mackerras 		physp = kvm->arch.slot_phys[memslot->id];
105993e60249SPaul Mackerras 		if (!physp)
106093e60249SPaul Mackerras 			return NULL;
1061da9d1d7fSPaul Mackerras 		physp += gfn - memslot->base_gfn;
106293e60249SPaul Mackerras 		pa = *physp;
1063c77162deSPaul Mackerras 		if (!pa) {
1064342d3db7SPaul Mackerras 			if (kvmppc_get_guest_page(kvm, gfn, memslot,
1065342d3db7SPaul Mackerras 						  PAGE_SIZE) < 0)
106693e60249SPaul Mackerras 				return NULL;
1067c77162deSPaul Mackerras 			pa = *physp;
1068c77162deSPaul Mackerras 		}
1069da9d1d7fSPaul Mackerras 		page = pfn_to_page(pa >> PAGE_SHIFT);
1070de6c0b02SDavid Gibson 		get_page(page);
1071342d3db7SPaul Mackerras 	} else {
1072342d3db7SPaul Mackerras 		hva = gfn_to_hva_memslot(memslot, gfn);
1073342d3db7SPaul Mackerras 		npages = get_user_pages_fast(hva, 1, 1, pages);
1074342d3db7SPaul Mackerras 		if (npages < 1)
1075342d3db7SPaul Mackerras 			return NULL;
1076342d3db7SPaul Mackerras 		page = pages[0];
1077342d3db7SPaul Mackerras 	}
1078da9d1d7fSPaul Mackerras 	psize = PAGE_SIZE;
1079da9d1d7fSPaul Mackerras 	if (PageHuge(page)) {
1080da9d1d7fSPaul Mackerras 		page = compound_head(page);
1081da9d1d7fSPaul Mackerras 		psize <<= compound_order(page);
1082da9d1d7fSPaul Mackerras 	}
1083da9d1d7fSPaul Mackerras 	offset = gpa & (psize - 1);
108493e60249SPaul Mackerras 	if (nb_ret)
1085da9d1d7fSPaul Mackerras 		*nb_ret = psize - offset;
108693e60249SPaul Mackerras 	return page_address(page) + offset;
108793e60249SPaul Mackerras }
108893e60249SPaul Mackerras 
108993e60249SPaul Mackerras void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
109093e60249SPaul Mackerras {
109193e60249SPaul Mackerras 	struct page *page = virt_to_page(va);
109293e60249SPaul Mackerras 
109393e60249SPaul Mackerras 	put_page(page);
109493e60249SPaul Mackerras }
109593e60249SPaul Mackerras 
1096de56a948SPaul Mackerras void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1097de56a948SPaul Mackerras {
1098de56a948SPaul Mackerras 	struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1099de56a948SPaul Mackerras 
11009e368f29SPaul Mackerras 	if (cpu_has_feature(CPU_FTR_ARCH_206))
11019e368f29SPaul Mackerras 		vcpu->arch.slb_nr = 32;		/* POWER7 */
11029e368f29SPaul Mackerras 	else
11039e368f29SPaul Mackerras 		vcpu->arch.slb_nr = 64;
1104de56a948SPaul Mackerras 
1105de56a948SPaul Mackerras 	mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1106de56a948SPaul Mackerras 	mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1107de56a948SPaul Mackerras 
1108de56a948SPaul Mackerras 	vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
1109de56a948SPaul Mackerras }
1110