1d94d71cbSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2de56a948SPaul Mackerras /*
3de56a948SPaul Mackerras *
4de56a948SPaul Mackerras * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5de56a948SPaul Mackerras */
6de56a948SPaul Mackerras
7de56a948SPaul Mackerras #include <linux/types.h>
8de56a948SPaul Mackerras #include <linux/string.h>
9de56a948SPaul Mackerras #include <linux/kvm.h>
10de56a948SPaul Mackerras #include <linux/kvm_host.h>
11de56a948SPaul Mackerras #include <linux/highmem.h>
12de56a948SPaul Mackerras #include <linux/gfp.h>
13de56a948SPaul Mackerras #include <linux/slab.h>
14de56a948SPaul Mackerras #include <linux/hugetlb.h>
158936dda4SPaul Mackerras #include <linux/vmalloc.h>
162c9097e4SPaul Mackerras #include <linux/srcu.h>
17a2932923SPaul Mackerras #include <linux/anon_inodes.h>
18a2932923SPaul Mackerras #include <linux/file.h>
19e23a808bSPaul Mackerras #include <linux/debugfs.h>
20de56a948SPaul Mackerras
21de56a948SPaul Mackerras #include <asm/kvm_ppc.h>
22de56a948SPaul Mackerras #include <asm/kvm_book3s.h>
23f64e8084SAneesh Kumar K.V #include <asm/book3s/64/mmu-hash.h>
24de56a948SPaul Mackerras #include <asm/hvcall.h>
25de56a948SPaul Mackerras #include <asm/synch.h>
26de56a948SPaul Mackerras #include <asm/ppc-opcode.h>
27de56a948SPaul Mackerras #include <asm/cputable.h>
2894171b19SAneesh Kumar K.V #include <asm/pte-walk.h>
29de56a948SPaul Mackerras
30d834915eSCédric Le Goater #include "book3s.h"
31*267980eaSJordan Niethe #include "book3s_hv.h"
323c78f78aSSuresh E. Warrier #include "trace_hv.h"
333c78f78aSSuresh E. Warrier
345e985969SDavid Gibson //#define DEBUG_RESIZE_HPT 1
355e985969SDavid Gibson
365e985969SDavid Gibson #ifdef DEBUG_RESIZE_HPT
375e985969SDavid Gibson #define resize_hpt_debug(resize, ...) \
385e985969SDavid Gibson do { \
395e985969SDavid Gibson printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \
405e985969SDavid Gibson printk(__VA_ARGS__); \
415e985969SDavid Gibson } while (0)
425e985969SDavid Gibson #else
435e985969SDavid Gibson #define resize_hpt_debug(resize, ...) \
445e985969SDavid Gibson do { } while (0)
455e985969SDavid Gibson #endif
465e985969SDavid Gibson
477ed661bfSPaul Mackerras static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
487ed661bfSPaul Mackerras long pte_index, unsigned long pteh,
497ed661bfSPaul Mackerras unsigned long ptel, unsigned long *pte_idx_ret);
505e985969SDavid Gibson
515e985969SDavid Gibson struct kvm_resize_hpt {
525e985969SDavid Gibson /* These fields read-only after init */
535e985969SDavid Gibson struct kvm *kvm;
545e985969SDavid Gibson struct work_struct work;
555e985969SDavid Gibson u32 order;
565e985969SDavid Gibson
570d4ee88dSPaul Mackerras /* These fields protected by kvm->arch.mmu_setup_lock */
58b5baa687SDavid Gibson
593073774eSSerhii Popovych /* Possible values and their usage:
603073774eSSerhii Popovych * <0 an error occurred during allocation,
613073774eSSerhii Popovych * -EBUSY allocation is in the progress,
621fd02f66SJulia Lawall * 0 allocation made successfully.
633073774eSSerhii Popovych */
643073774eSSerhii Popovych int error;
653073774eSSerhii Popovych
663073774eSSerhii Popovych /* Private to the work thread, until error != -EBUSY,
670d4ee88dSPaul Mackerras * then protected by kvm->arch.mmu_setup_lock.
683073774eSSerhii Popovych */
69b5baa687SDavid Gibson struct kvm_hpt_info hpt;
705e985969SDavid Gibson };
715e985969SDavid Gibson
kvmppc_allocate_hpt(struct kvm_hpt_info * info,u32 order)72aae0777fSDavid Gibson int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order)
73de56a948SPaul Mackerras {
74792fc497SAneesh Kumar K.V unsigned long hpt = 0;
75aae0777fSDavid Gibson int cma = 0;
76fa61a4e3SAneesh Kumar K.V struct page *page = NULL;
77aae0777fSDavid Gibson struct revmap_entry *rev;
78aae0777fSDavid Gibson unsigned long npte;
79de56a948SPaul Mackerras
80aae0777fSDavid Gibson if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER))
81aae0777fSDavid Gibson return -EINVAL;
8232fad281SPaul Mackerras
83db9a290dSDavid Gibson page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
84fa61a4e3SAneesh Kumar K.V if (page) {
85fa61a4e3SAneesh Kumar K.V hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
8602a68d05SLaurent Dufour memset((void *)hpt, 0, (1ul << order));
87aae0777fSDavid Gibson cma = 1;
88d2a1b483SAlexander Graf }
89d2a1b483SAlexander Graf
9032fad281SPaul Mackerras if (!hpt)
91dcda9b04SMichal Hocko hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL
92aae0777fSDavid Gibson |__GFP_NOWARN, order - PAGE_SHIFT);
9332fad281SPaul Mackerras
9432fad281SPaul Mackerras if (!hpt)
9532fad281SPaul Mackerras return -ENOMEM;
9632fad281SPaul Mackerras
97aae0777fSDavid Gibson /* HPTEs are 2**4 bytes long */
98aae0777fSDavid Gibson npte = 1ul << (order - 4);
99a56ee9f8SYongji Xie
1008936dda4SPaul Mackerras /* Allocate reverse map array */
10142bc47b3SKees Cook rev = vmalloc(array_size(npte, sizeof(struct revmap_entry)));
1028936dda4SPaul Mackerras if (!rev) {
103aae0777fSDavid Gibson if (cma)
104db9a290dSDavid Gibson kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
10532fad281SPaul Mackerras else
10632fad281SPaul Mackerras free_pages(hpt, order - PAGE_SHIFT);
1078936dda4SPaul Mackerras return -ENOMEM;
108de56a948SPaul Mackerras }
109de56a948SPaul Mackerras
110aae0777fSDavid Gibson info->order = order;
111aae0777fSDavid Gibson info->virt = hpt;
112aae0777fSDavid Gibson info->cma = cma;
113aae0777fSDavid Gibson info->rev = rev;
114aae0777fSDavid Gibson
115aae0777fSDavid Gibson return 0;
116aae0777fSDavid Gibson }
117aae0777fSDavid Gibson
kvmppc_set_hpt(struct kvm * kvm,struct kvm_hpt_info * info)118aae0777fSDavid Gibson void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
119aae0777fSDavid Gibson {
120aae0777fSDavid Gibson atomic64_set(&kvm->arch.mmio_update, 0);
121aae0777fSDavid Gibson kvm->arch.hpt = *info;
122aae0777fSDavid Gibson kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
123aae0777fSDavid Gibson
1243a4f1760SThomas Huth pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
125aae0777fSDavid Gibson info->virt, (long)info->order, kvm->arch.lpid);
126aae0777fSDavid Gibson }
127aae0777fSDavid Gibson
kvmppc_alloc_reset_hpt(struct kvm * kvm,int order)12867c48662SThomas Huth int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
12932fad281SPaul Mackerras {
13067c48662SThomas Huth int err = -EBUSY;
131f98a8bf9SDavid Gibson struct kvm_hpt_info info;
13232fad281SPaul Mackerras
1330d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
1341b151ce4SPaul Mackerras if (kvm->arch.mmu_ready) {
1351b151ce4SPaul Mackerras kvm->arch.mmu_ready = 0;
1361b151ce4SPaul Mackerras /* order mmu_ready vs. vcpus_running */
13732fad281SPaul Mackerras smp_mb();
13832fad281SPaul Mackerras if (atomic_read(&kvm->arch.vcpus_running)) {
1391b151ce4SPaul Mackerras kvm->arch.mmu_ready = 1;
14032fad281SPaul Mackerras goto out;
14132fad281SPaul Mackerras }
14232fad281SPaul Mackerras }
14318c3640cSPaul Mackerras if (kvm_is_radix(kvm)) {
14418c3640cSPaul Mackerras err = kvmppc_switch_mmu_to_hpt(kvm);
14518c3640cSPaul Mackerras if (err)
14618c3640cSPaul Mackerras goto out;
14718c3640cSPaul Mackerras }
14818c3640cSPaul Mackerras
149f98a8bf9SDavid Gibson if (kvm->arch.hpt.order == order) {
150f98a8bf9SDavid Gibson /* We already have a suitable HPT */
151f98a8bf9SDavid Gibson
15232fad281SPaul Mackerras /* Set the entire HPT to 0, i.e. invalid HPTEs */
1533f9d4f5aSDavid Gibson memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
15432fad281SPaul Mackerras /*
155a64fd707SPaul Mackerras * Reset all the reverse-mapping chains for all memslots
156a64fd707SPaul Mackerras */
157a64fd707SPaul Mackerras kvmppc_rmap_reset(kvm);
15832fad281SPaul Mackerras err = 0;
159f98a8bf9SDavid Gibson goto out;
160f98a8bf9SDavid Gibson }
161aae0777fSDavid Gibson
162ef427198SPaul Mackerras if (kvm->arch.hpt.virt) {
163f98a8bf9SDavid Gibson kvmppc_free_hpt(&kvm->arch.hpt);
164ef427198SPaul Mackerras kvmppc_rmap_reset(kvm);
165ef427198SPaul Mackerras }
166f98a8bf9SDavid Gibson
167f98a8bf9SDavid Gibson err = kvmppc_allocate_hpt(&info, order);
168aae0777fSDavid Gibson if (err < 0)
169aae0777fSDavid Gibson goto out;
170aae0777fSDavid Gibson kvmppc_set_hpt(kvm, &info);
171f98a8bf9SDavid Gibson
17232fad281SPaul Mackerras out:
173ecba8297SDavid Gibson if (err == 0)
174ecba8297SDavid Gibson /* Ensure that each vcpu will flush its TLB on next entry. */
175ecba8297SDavid Gibson cpumask_setall(&kvm->arch.need_tlb_flush);
176ecba8297SDavid Gibson
1770d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
17832fad281SPaul Mackerras return err;
17932fad281SPaul Mackerras }
18032fad281SPaul Mackerras
kvmppc_free_hpt(struct kvm_hpt_info * info)181aae0777fSDavid Gibson void kvmppc_free_hpt(struct kvm_hpt_info *info)
182de56a948SPaul Mackerras {
183aae0777fSDavid Gibson vfree(info->rev);
18418c3640cSPaul Mackerras info->rev = NULL;
185aae0777fSDavid Gibson if (info->cma)
18658b6fed8SLinus Walleij kvm_free_hpt_cma(virt_to_page((void *)info->virt),
187aae0777fSDavid Gibson 1 << (info->order - PAGE_SHIFT));
188aae0777fSDavid Gibson else if (info->virt)
189aae0777fSDavid Gibson free_pages(info->virt, info->order - PAGE_SHIFT);
190aae0777fSDavid Gibson info->virt = 0;
191aae0777fSDavid Gibson info->order = 0;
192de56a948SPaul Mackerras }
193de56a948SPaul Mackerras
194da9d1d7fSPaul Mackerras /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
hpte0_pgsize_encoding(unsigned long pgsize)195da9d1d7fSPaul Mackerras static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
196de56a948SPaul Mackerras {
197da9d1d7fSPaul Mackerras return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
198da9d1d7fSPaul Mackerras }
199da9d1d7fSPaul Mackerras
200da9d1d7fSPaul Mackerras /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
hpte1_pgsize_encoding(unsigned long pgsize)201da9d1d7fSPaul Mackerras static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
202da9d1d7fSPaul Mackerras {
203da9d1d7fSPaul Mackerras return (pgsize == 0x10000) ? 0x1000 : 0;
204da9d1d7fSPaul Mackerras }
205da9d1d7fSPaul Mackerras
kvmppc_map_vrma(struct kvm_vcpu * vcpu,struct kvm_memory_slot * memslot,unsigned long porder)206da9d1d7fSPaul Mackerras void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
207da9d1d7fSPaul Mackerras unsigned long porder)
208da9d1d7fSPaul Mackerras {
209de56a948SPaul Mackerras unsigned long i;
210b2b2f165SPaul Mackerras unsigned long npages;
211c77162deSPaul Mackerras unsigned long hp_v, hp_r;
212c77162deSPaul Mackerras unsigned long addr, hash;
213da9d1d7fSPaul Mackerras unsigned long psize;
214da9d1d7fSPaul Mackerras unsigned long hp0, hp1;
2157ed661bfSPaul Mackerras unsigned long idx_ret;
216c77162deSPaul Mackerras long ret;
21732fad281SPaul Mackerras struct kvm *kvm = vcpu->kvm;
218de56a948SPaul Mackerras
219da9d1d7fSPaul Mackerras psize = 1ul << porder;
220da9d1d7fSPaul Mackerras npages = memslot->npages >> (porder - PAGE_SHIFT);
221de56a948SPaul Mackerras
222de56a948SPaul Mackerras /* VRMA can't be > 1TB */
2238936dda4SPaul Mackerras if (npages > 1ul << (40 - porder))
2248936dda4SPaul Mackerras npages = 1ul << (40 - porder);
225de56a948SPaul Mackerras /* Can't use more than 1 HPTE per HPTEG */
2263d089f84SDavid Gibson if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1)
2273d089f84SDavid Gibson npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1;
228de56a948SPaul Mackerras
229da9d1d7fSPaul Mackerras hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
230da9d1d7fSPaul Mackerras HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
231da9d1d7fSPaul Mackerras hp1 = hpte1_pgsize_encoding(psize) |
232da9d1d7fSPaul Mackerras HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
233da9d1d7fSPaul Mackerras
234de56a948SPaul Mackerras for (i = 0; i < npages; ++i) {
235c77162deSPaul Mackerras addr = i << porder;
236de56a948SPaul Mackerras /* can't use hpt_hash since va > 64 bits */
2373d089f84SDavid Gibson hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25)))
2383d089f84SDavid Gibson & kvmppc_hpt_mask(&kvm->arch.hpt);
239de56a948SPaul Mackerras /*
240de56a948SPaul Mackerras * We assume that the hash table is empty and no
241de56a948SPaul Mackerras * vcpus are using it at this stage. Since we create
242de56a948SPaul Mackerras * at most one HPTE per HPTEG, we just assume entry 7
243de56a948SPaul Mackerras * is available and use it.
244de56a948SPaul Mackerras */
2458936dda4SPaul Mackerras hash = (hash << 3) + 7;
246da9d1d7fSPaul Mackerras hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
247da9d1d7fSPaul Mackerras hp_r = hp1 | addr;
2487ed661bfSPaul Mackerras ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
2497ed661bfSPaul Mackerras &idx_ret);
250c77162deSPaul Mackerras if (ret != H_SUCCESS) {
251c77162deSPaul Mackerras pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
252c77162deSPaul Mackerras addr, ret);
253c77162deSPaul Mackerras break;
254c77162deSPaul Mackerras }
255de56a948SPaul Mackerras }
256de56a948SPaul Mackerras }
257de56a948SPaul Mackerras
kvmppc_mmu_hv_init(void)258de56a948SPaul Mackerras int kvmppc_mmu_hv_init(void)
259de56a948SPaul Mackerras {
2605d506f15SNicholas Piggin unsigned long nr_lpids;
2619e368f29SPaul Mackerras
262b7557451SNicholas Piggin if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
263b7557451SNicholas Piggin return -EINVAL;
264b7557451SNicholas Piggin
26518827eeeSNicholas Piggin if (cpu_has_feature(CPU_FTR_HVMODE)) {
26618827eeeSNicholas Piggin if (WARN_ON(mfspr(SPRN_LPID) != 0))
26718827eeeSNicholas Piggin return -EINVAL;
2685d506f15SNicholas Piggin nr_lpids = 1UL << mmu_lpid_bits;
2695d506f15SNicholas Piggin } else {
27003a2e65fSNicholas Piggin nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT;
27118827eeeSNicholas Piggin }
272e55f4d58SCédric Le Goater
2735d506f15SNicholas Piggin if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2745d506f15SNicholas Piggin /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */
275e55f4d58SCédric Le Goater if (cpu_has_feature(CPU_FTR_ARCH_207S))
2765d506f15SNicholas Piggin WARN_ON(nr_lpids != 1UL << 12);
277e55f4d58SCédric Le Goater else
2785d506f15SNicholas Piggin WARN_ON(nr_lpids != 1UL << 10);
2799e368f29SPaul Mackerras
2805d506f15SNicholas Piggin /*
2815d506f15SNicholas Piggin * Reserve the last implemented LPID use in partition
2825d506f15SNicholas Piggin * switching for POWER7 and POWER8.
2835d506f15SNicholas Piggin */
2845d506f15SNicholas Piggin nr_lpids -= 1;
2855d506f15SNicholas Piggin }
286043cc4d7SScott Wood
2875d506f15SNicholas Piggin kvmppc_init_lpid(nr_lpids);
288de56a948SPaul Mackerras
289de56a948SPaul Mackerras return 0;
290de56a948SPaul Mackerras }
291de56a948SPaul Mackerras
kvmppc_virtmode_do_h_enter(struct kvm * kvm,unsigned long flags,long pte_index,unsigned long pteh,unsigned long ptel,unsigned long * pte_idx_ret)292025c9511SDaniel Axtens static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
2937ed661bfSPaul Mackerras long pte_index, unsigned long pteh,
2947ed661bfSPaul Mackerras unsigned long ptel, unsigned long *pte_idx_ret)
295c77162deSPaul Mackerras {
296c77162deSPaul Mackerras long ret;
297c77162deSPaul Mackerras
298e3d8ed55SAneesh Kumar K.V preempt_disable();
2997ed661bfSPaul Mackerras ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
3008a9c8925SLeonardo Bras kvm->mm->pgd, false, pte_idx_ret);
301e3d8ed55SAneesh Kumar K.V preempt_enable();
302c77162deSPaul Mackerras if (ret == H_TOO_HARD) {
303c77162deSPaul Mackerras /* this can't happen */
304c77162deSPaul Mackerras pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
305c77162deSPaul Mackerras ret = H_RESOURCE; /* or something */
306c77162deSPaul Mackerras }
307c77162deSPaul Mackerras return ret;
308c77162deSPaul Mackerras
309c77162deSPaul Mackerras }
310c77162deSPaul Mackerras
kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu * vcpu,gva_t eaddr)311697d3899SPaul Mackerras static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
312697d3899SPaul Mackerras gva_t eaddr)
313697d3899SPaul Mackerras {
314697d3899SPaul Mackerras u64 mask;
315697d3899SPaul Mackerras int i;
316697d3899SPaul Mackerras
317697d3899SPaul Mackerras for (i = 0; i < vcpu->arch.slb_nr; i++) {
318697d3899SPaul Mackerras if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
319697d3899SPaul Mackerras continue;
320697d3899SPaul Mackerras
321697d3899SPaul Mackerras if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
322697d3899SPaul Mackerras mask = ESID_MASK_1T;
323697d3899SPaul Mackerras else
324697d3899SPaul Mackerras mask = ESID_MASK;
325697d3899SPaul Mackerras
326697d3899SPaul Mackerras if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
327697d3899SPaul Mackerras return &vcpu->arch.slb[i];
328697d3899SPaul Mackerras }
329697d3899SPaul Mackerras return NULL;
330697d3899SPaul Mackerras }
331697d3899SPaul Mackerras
kvmppc_mmu_get_real_addr(unsigned long v,unsigned long r,unsigned long ea)332697d3899SPaul Mackerras static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
333697d3899SPaul Mackerras unsigned long ea)
334697d3899SPaul Mackerras {
335697d3899SPaul Mackerras unsigned long ra_mask;
336697d3899SPaul Mackerras
3378dc6cca5SPaul Mackerras ra_mask = kvmppc_actual_pgsz(v, r) - 1;
338697d3899SPaul Mackerras return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
339697d3899SPaul Mackerras }
340697d3899SPaul Mackerras
kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data,bool iswrite)341de56a948SPaul Mackerras static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
34293b159b4SPaul Mackerras struct kvmppc_pte *gpte, bool data, bool iswrite)
343de56a948SPaul Mackerras {
344697d3899SPaul Mackerras struct kvm *kvm = vcpu->kvm;
345697d3899SPaul Mackerras struct kvmppc_slb *slbe;
346697d3899SPaul Mackerras unsigned long slb_v;
347697d3899SPaul Mackerras unsigned long pp, key;
348abb7c7ddSPaul Mackerras unsigned long v, orig_v, gr;
3496f22bd32SAlexander Graf __be64 *hptep;
35046dec40fSPaul Mackerras long int index;
351*267980eaSJordan Niethe int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
352697d3899SPaul Mackerras
35318c3640cSPaul Mackerras if (kvm_is_radix(vcpu->kvm))
35418c3640cSPaul Mackerras return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
35518c3640cSPaul Mackerras
356697d3899SPaul Mackerras /* Get SLB entry */
357697d3899SPaul Mackerras if (virtmode) {
358697d3899SPaul Mackerras slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
359697d3899SPaul Mackerras if (!slbe)
360697d3899SPaul Mackerras return -EINVAL;
361697d3899SPaul Mackerras slb_v = slbe->origv;
362697d3899SPaul Mackerras } else {
363697d3899SPaul Mackerras /* real mode access */
364697d3899SPaul Mackerras slb_v = vcpu->kvm->arch.vrma_slb_v;
365697d3899SPaul Mackerras }
366697d3899SPaul Mackerras
36791648ec0Spingfan liu preempt_disable();
368697d3899SPaul Mackerras /* Find the HPTE in the hash table */
369697d3899SPaul Mackerras index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
370697d3899SPaul Mackerras HPTE_V_VALID | HPTE_V_ABSENT);
37191648ec0Spingfan liu if (index < 0) {
37291648ec0Spingfan liu preempt_enable();
373de56a948SPaul Mackerras return -ENOENT;
37491648ec0Spingfan liu }
3753f9d4f5aSDavid Gibson hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
376abb7c7ddSPaul Mackerras v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
377abb7c7ddSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300))
378abb7c7ddSPaul Mackerras v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
3793f9d4f5aSDavid Gibson gr = kvm->arch.hpt.rev[index].guest_rpte;
380697d3899SPaul Mackerras
381abb7c7ddSPaul Mackerras unlock_hpte(hptep, orig_v);
38291648ec0Spingfan liu preempt_enable();
383697d3899SPaul Mackerras
384697d3899SPaul Mackerras gpte->eaddr = eaddr;
385697d3899SPaul Mackerras gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
386697d3899SPaul Mackerras
387697d3899SPaul Mackerras /* Get PP bits and key for permission check */
388697d3899SPaul Mackerras pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
389*267980eaSJordan Niethe key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
390697d3899SPaul Mackerras key &= slb_v;
391697d3899SPaul Mackerras
392697d3899SPaul Mackerras /* Calculate permissions */
393697d3899SPaul Mackerras gpte->may_read = hpte_read_permission(pp, key);
394697d3899SPaul Mackerras gpte->may_write = hpte_write_permission(pp, key);
395697d3899SPaul Mackerras gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
396697d3899SPaul Mackerras
397697d3899SPaul Mackerras /* Storage key permission check for POWER7 */
398c17b98cfSPaul Mackerras if (data && virtmode) {
399697d3899SPaul Mackerras int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
400697d3899SPaul Mackerras if (amrfield & 1)
401697d3899SPaul Mackerras gpte->may_read = 0;
402697d3899SPaul Mackerras if (amrfield & 2)
403697d3899SPaul Mackerras gpte->may_write = 0;
404697d3899SPaul Mackerras }
405697d3899SPaul Mackerras
406697d3899SPaul Mackerras /* Get the guest physical address */
407697d3899SPaul Mackerras gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
408697d3899SPaul Mackerras return 0;
409697d3899SPaul Mackerras }
410697d3899SPaul Mackerras
411697d3899SPaul Mackerras /*
412697d3899SPaul Mackerras * Quick test for whether an instruction is a load or a store.
413697d3899SPaul Mackerras * If the instruction is a load or a store, then this will indicate
414697d3899SPaul Mackerras * which it is, at least on server processors. (Embedded processors
415697d3899SPaul Mackerras * have some external PID instructions that don't follow the rule
416697d3899SPaul Mackerras * embodied here.) If the instruction isn't a load or store, then
417697d3899SPaul Mackerras * this doesn't return anything useful.
418697d3899SPaul Mackerras */
instruction_is_store(ppc_inst_t instr)419acf17878SPaul Mackerras static int instruction_is_store(ppc_inst_t instr)
420697d3899SPaul Mackerras {
421697d3899SPaul Mackerras unsigned int mask;
422acf17878SPaul Mackerras unsigned int suffix;
423697d3899SPaul Mackerras
424697d3899SPaul Mackerras mask = 0x10000000;
425acf17878SPaul Mackerras suffix = ppc_inst_val(instr);
426acf17878SPaul Mackerras if (ppc_inst_prefixed(instr))
427acf17878SPaul Mackerras suffix = ppc_inst_suffix(instr);
428acf17878SPaul Mackerras else if ((suffix & 0xfc000000) == 0x7c000000)
429697d3899SPaul Mackerras mask = 0x100; /* major opcode 31 */
430acf17878SPaul Mackerras return (suffix & mask) != 0;
431697d3899SPaul Mackerras }
432697d3899SPaul Mackerras
kvmppc_hv_emulate_mmio(struct kvm_vcpu * vcpu,unsigned long gpa,gva_t ea,int is_store)4338c99d345STianjia Zhang int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
4346020c0f6SAlexander Graf unsigned long gpa, gva_t ea, int is_store)
435697d3899SPaul Mackerras {
436acf17878SPaul Mackerras ppc_inst_t last_inst;
437953e3739SPaul Mackerras bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
438697d3899SPaul Mackerras
43951f04726SMihai Caraman /*
4401b642257SSuraj Jitindar Singh * Fast path - check if the guest physical address corresponds to a
4411b642257SSuraj Jitindar Singh * device on the FAST_MMIO_BUS, if so we can avoid loading the
4421b642257SSuraj Jitindar Singh * instruction all together, then we can just handle it and return.
4431b642257SSuraj Jitindar Singh */
4441b642257SSuraj Jitindar Singh if (is_store) {
4451b642257SSuraj Jitindar Singh int idx, ret;
4461b642257SSuraj Jitindar Singh
4471b642257SSuraj Jitindar Singh idx = srcu_read_lock(&vcpu->kvm->srcu);
4481b642257SSuraj Jitindar Singh ret = kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, (gpa_t) gpa, 0,
4491b642257SSuraj Jitindar Singh NULL);
4501b642257SSuraj Jitindar Singh srcu_read_unlock(&vcpu->kvm->srcu, idx);
4511b642257SSuraj Jitindar Singh if (!ret) {
452953e3739SPaul Mackerras kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4));
4531b642257SSuraj Jitindar Singh return RESUME_GUEST;
4541b642257SSuraj Jitindar Singh }
4551b642257SSuraj Jitindar Singh }
4561b642257SSuraj Jitindar Singh
4571b642257SSuraj Jitindar Singh /*
458697d3899SPaul Mackerras * If we fail, we just return to the guest and try executing it again.
459697d3899SPaul Mackerras */
46051f04726SMihai Caraman if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
46151f04726SMihai Caraman EMULATE_DONE)
462697d3899SPaul Mackerras return RESUME_GUEST;
463697d3899SPaul Mackerras
464697d3899SPaul Mackerras /*
465697d3899SPaul Mackerras * WARNING: We do not know for sure whether the instruction we just
466697d3899SPaul Mackerras * read from memory is the same that caused the fault in the first
467953e3739SPaul Mackerras * place.
468953e3739SPaul Mackerras *
469953e3739SPaul Mackerras * If the fault is prefixed but the instruction is not or vice
470953e3739SPaul Mackerras * versa, try again so that we don't advance pc the wrong amount.
471953e3739SPaul Mackerras */
472953e3739SPaul Mackerras if (ppc_inst_prefixed(last_inst) != is_prefixed)
473953e3739SPaul Mackerras return RESUME_GUEST;
474953e3739SPaul Mackerras
475953e3739SPaul Mackerras /*
476953e3739SPaul Mackerras * If the instruction we read is neither an load or a store,
477697d3899SPaul Mackerras * then it can't access memory, so we don't need to worry about
478697d3899SPaul Mackerras * enforcing access permissions. So, assuming it is a load or
479697d3899SPaul Mackerras * store, we just check that its direction (load or store) is
480697d3899SPaul Mackerras * consistent with the original fault, since that's what we
481697d3899SPaul Mackerras * checked the access permissions against. If there is a mismatch
482697d3899SPaul Mackerras * we just return and retry the instruction.
483697d3899SPaul Mackerras */
484697d3899SPaul Mackerras
48551f04726SMihai Caraman if (instruction_is_store(last_inst) != !!is_store)
486697d3899SPaul Mackerras return RESUME_GUEST;
487697d3899SPaul Mackerras
488697d3899SPaul Mackerras /*
489697d3899SPaul Mackerras * Emulated accesses are emulated by looking at the hash for
490697d3899SPaul Mackerras * translation once, then performing the access later. The
491697d3899SPaul Mackerras * translation could be invalidated in the meantime in which
492697d3899SPaul Mackerras * point performing the subsequent memory access on the old
493697d3899SPaul Mackerras * physical address could possibly be a security hole for the
494697d3899SPaul Mackerras * guest (but not the host).
495697d3899SPaul Mackerras *
496697d3899SPaul Mackerras * This is less of an issue for MMIO stores since they aren't
497697d3899SPaul Mackerras * globally visible. It could be an issue for MMIO loads to
498697d3899SPaul Mackerras * a certain extent but we'll ignore it for now.
499697d3899SPaul Mackerras */
500697d3899SPaul Mackerras
501697d3899SPaul Mackerras vcpu->arch.paddr_accessed = gpa;
5026020c0f6SAlexander Graf vcpu->arch.vaddr_accessed = ea;
5038c99d345STianjia Zhang return kvmppc_emulate_mmio(vcpu);
504697d3899SPaul Mackerras }
505697d3899SPaul Mackerras
kvmppc_book3s_hv_page_fault(struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)5068c99d345STianjia Zhang int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
507697d3899SPaul Mackerras unsigned long ea, unsigned long dsisr)
508697d3899SPaul Mackerras {
509697d3899SPaul Mackerras struct kvm *kvm = vcpu->kvm;
5106f22bd32SAlexander Graf unsigned long hpte[3], r;
511abb7c7ddSPaul Mackerras unsigned long hnow_v, hnow_r;
5126f22bd32SAlexander Graf __be64 *hptep;
513342d3db7SPaul Mackerras unsigned long mmu_seq, psize, pte_size;
5141066f772SPaul Mackerras unsigned long gpa_base, gfn_base;
515cd758a9bSPaul Mackerras unsigned long gpa, gfn, hva, pfn, hpa;
516697d3899SPaul Mackerras struct kvm_memory_slot *memslot;
517342d3db7SPaul Mackerras unsigned long *rmap;
518697d3899SPaul Mackerras struct revmap_entry *rev;
519cd758a9bSPaul Mackerras struct page *page;
520cd758a9bSPaul Mackerras long index, ret;
52130bda41aSAneesh Kumar K.V bool is_ci;
522cd758a9bSPaul Mackerras bool writing, write_ok;
523cd758a9bSPaul Mackerras unsigned int shift;
524bad3b507SPaul Mackerras unsigned long rcbits;
525a56ee9f8SYongji Xie long mmio_update;
526cd758a9bSPaul Mackerras pte_t pte, *ptep;
527697d3899SPaul Mackerras
5285a319350SPaul Mackerras if (kvm_is_radix(kvm))
5298c99d345STianjia Zhang return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
5305a319350SPaul Mackerras
531697d3899SPaul Mackerras /*
532697d3899SPaul Mackerras * Real-mode code has already searched the HPT and found the
533697d3899SPaul Mackerras * entry we're interested in. Lock the entry and check that
534697d3899SPaul Mackerras * it hasn't changed. If it has, just return and re-execute the
535697d3899SPaul Mackerras * instruction.
536697d3899SPaul Mackerras */
537697d3899SPaul Mackerras if (ea != vcpu->arch.pgfault_addr)
538697d3899SPaul Mackerras return RESUME_GUEST;
539a56ee9f8SYongji Xie
540a56ee9f8SYongji Xie if (vcpu->arch.pgfault_cache) {
541a56ee9f8SYongji Xie mmio_update = atomic64_read(&kvm->arch.mmio_update);
542a56ee9f8SYongji Xie if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) {
543a56ee9f8SYongji Xie r = vcpu->arch.pgfault_cache->rpte;
5448dc6cca5SPaul Mackerras psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0],
5458dc6cca5SPaul Mackerras r);
546a56ee9f8SYongji Xie gpa_base = r & HPTE_R_RPN & ~(psize - 1);
547a56ee9f8SYongji Xie gfn_base = gpa_base >> PAGE_SHIFT;
548a56ee9f8SYongji Xie gpa = gpa_base | (ea & (psize - 1));
5498c99d345STianjia Zhang return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
550a56ee9f8SYongji Xie dsisr & DSISR_ISSTORE);
551a56ee9f8SYongji Xie }
552a56ee9f8SYongji Xie }
553697d3899SPaul Mackerras index = vcpu->arch.pgfault_index;
5543f9d4f5aSDavid Gibson hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
5553f9d4f5aSDavid Gibson rev = &kvm->arch.hpt.rev[index];
556697d3899SPaul Mackerras preempt_disable();
557697d3899SPaul Mackerras while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
558697d3899SPaul Mackerras cpu_relax();
5596f22bd32SAlexander Graf hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
5606f22bd32SAlexander Graf hpte[1] = be64_to_cpu(hptep[1]);
561342d3db7SPaul Mackerras hpte[2] = r = rev->guest_rpte;
562a4bd6eb0SAneesh Kumar K.V unlock_hpte(hptep, hpte[0]);
563697d3899SPaul Mackerras preempt_enable();
564697d3899SPaul Mackerras
565abb7c7ddSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300)) {
566abb7c7ddSPaul Mackerras hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]);
567abb7c7ddSPaul Mackerras hpte[1] = hpte_new_to_old_r(hpte[1]);
568abb7c7ddSPaul Mackerras }
569697d3899SPaul Mackerras if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
570697d3899SPaul Mackerras hpte[1] != vcpu->arch.pgfault_hpte[1])
571697d3899SPaul Mackerras return RESUME_GUEST;
572697d3899SPaul Mackerras
573697d3899SPaul Mackerras /* Translate the logical address and get the page */
5748dc6cca5SPaul Mackerras psize = kvmppc_actual_pgsz(hpte[0], r);
5751066f772SPaul Mackerras gpa_base = r & HPTE_R_RPN & ~(psize - 1);
5761066f772SPaul Mackerras gfn_base = gpa_base >> PAGE_SHIFT;
5771066f772SPaul Mackerras gpa = gpa_base | (ea & (psize - 1));
57870bddfefSPaul Mackerras gfn = gpa >> PAGE_SHIFT;
579697d3899SPaul Mackerras memslot = gfn_to_memslot(kvm, gfn);
580697d3899SPaul Mackerras
5813c78f78aSSuresh E. Warrier trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr);
5823c78f78aSSuresh E. Warrier
583697d3899SPaul Mackerras /* No memslot means it's an emulated MMIO region */
58470bddfefSPaul Mackerras if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
5858c99d345STianjia Zhang return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
586697d3899SPaul Mackerras dsisr & DSISR_ISSTORE);
587697d3899SPaul Mackerras
5881066f772SPaul Mackerras /*
5891066f772SPaul Mackerras * This should never happen, because of the slot_is_aligned()
5901066f772SPaul Mackerras * check in kvmppc_do_h_enter().
5911066f772SPaul Mackerras */
5921066f772SPaul Mackerras if (gfn_base < memslot->base_gfn)
5931066f772SPaul Mackerras return -EFAULT;
5941066f772SPaul Mackerras
595342d3db7SPaul Mackerras /* used to check for invalidations in progress */
59620ec3ebdSChao Peng mmu_seq = kvm->mmu_invalidate_seq;
597342d3db7SPaul Mackerras smp_rmb();
598342d3db7SPaul Mackerras
5993c78f78aSSuresh E. Warrier ret = -EFAULT;
600342d3db7SPaul Mackerras page = NULL;
6014cf302bcSPaul Mackerras writing = (dsisr & DSISR_ISSTORE) != 0;
6024cf302bcSPaul Mackerras /* If writing != 0, then the HPTE must allow writing, if we get here */
6034cf302bcSPaul Mackerras write_ok = writing;
604342d3db7SPaul Mackerras hva = gfn_to_hva_memslot(memslot, gfn);
605cd758a9bSPaul Mackerras
6064cf302bcSPaul Mackerras /*
607cd758a9bSPaul Mackerras * Do a fast check first, since __gfn_to_pfn_memslot doesn't
608cd758a9bSPaul Mackerras * do it with !atomic && !async, which is how we call it.
609cd758a9bSPaul Mackerras * We always ask for write permission since the common case
610cd758a9bSPaul Mackerras * is that the page is writable.
6114cf302bcSPaul Mackerras */
612dadbb612SSouptick Joarder if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
613cd758a9bSPaul Mackerras write_ok = true;
614cd758a9bSPaul Mackerras } else {
615cd758a9bSPaul Mackerras /* Call KVM generic code to do the slow-path check */
616c8b88b33SPeter Xu pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
6174a42d848SDavid Stevens writing, &write_ok, NULL);
618cd758a9bSPaul Mackerras if (is_error_noslot_pfn(pfn))
619cd758a9bSPaul Mackerras return -EFAULT;
620cd758a9bSPaul Mackerras page = NULL;
621cd758a9bSPaul Mackerras if (pfn_valid(pfn)) {
622cd758a9bSPaul Mackerras page = pfn_to_page(pfn);
623cd758a9bSPaul Mackerras if (PageReserved(page))
624cd758a9bSPaul Mackerras page = NULL;
6254cf302bcSPaul Mackerras }
626342d3db7SPaul Mackerras }
627342d3db7SPaul Mackerras
628cd758a9bSPaul Mackerras /*
629cd758a9bSPaul Mackerras * Read the PTE from the process' radix tree and use that
630cd758a9bSPaul Mackerras * so we get the shift and attribute bits.
631cd758a9bSPaul Mackerras */
6329781e759SAneesh Kumar K.V spin_lock(&kvm->mmu_lock);
6339781e759SAneesh Kumar K.V ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
634ae49dedaSPaul Mackerras pte = __pte(0);
635ae49dedaSPaul Mackerras if (ptep)
6369781e759SAneesh Kumar K.V pte = READ_ONCE(*ptep);
6379781e759SAneesh Kumar K.V spin_unlock(&kvm->mmu_lock);
638cd758a9bSPaul Mackerras /*
639cd758a9bSPaul Mackerras * If the PTE disappeared temporarily due to a THP
640cd758a9bSPaul Mackerras * collapse, just return and let the guest try again.
641cd758a9bSPaul Mackerras */
642ae49dedaSPaul Mackerras if (!pte_present(pte)) {
643cd758a9bSPaul Mackerras if (page)
644cd758a9bSPaul Mackerras put_page(page);
645cd758a9bSPaul Mackerras return RESUME_GUEST;
646cd758a9bSPaul Mackerras }
647cd758a9bSPaul Mackerras hpa = pte_pfn(pte) << PAGE_SHIFT;
648cd758a9bSPaul Mackerras pte_size = PAGE_SIZE;
649cd758a9bSPaul Mackerras if (shift)
650cd758a9bSPaul Mackerras pte_size = 1ul << shift;
651cd758a9bSPaul Mackerras is_ci = pte_ci(pte);
652cd758a9bSPaul Mackerras
653342d3db7SPaul Mackerras if (psize > pte_size)
654342d3db7SPaul Mackerras goto out_put;
655cd758a9bSPaul Mackerras if (pte_size > psize)
656cd758a9bSPaul Mackerras hpa |= hva & (pte_size - psize);
657342d3db7SPaul Mackerras
658342d3db7SPaul Mackerras /* Check WIMG vs. the actual page we're accessing */
65930bda41aSAneesh Kumar K.V if (!hpte_cache_flags_ok(r, is_ci)) {
66030bda41aSAneesh Kumar K.V if (is_ci)
6613c78f78aSSuresh E. Warrier goto out_put;
662342d3db7SPaul Mackerras /*
663342d3db7SPaul Mackerras * Allow guest to map emulated device memory as
664342d3db7SPaul Mackerras * uncacheable, but actually make it cacheable.
665342d3db7SPaul Mackerras */
666342d3db7SPaul Mackerras r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
667342d3db7SPaul Mackerras }
668342d3db7SPaul Mackerras
669caaa4c80SPaul Mackerras /*
670cd758a9bSPaul Mackerras * Set the HPTE to point to hpa.
671cd758a9bSPaul Mackerras * Since the hpa is at PAGE_SIZE granularity, make sure we
672caaa4c80SPaul Mackerras * don't mask out lower-order bits if psize < PAGE_SIZE.
673caaa4c80SPaul Mackerras */
674caaa4c80SPaul Mackerras if (psize < PAGE_SIZE)
675caaa4c80SPaul Mackerras psize = PAGE_SIZE;
676cd758a9bSPaul Mackerras r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
6774cf302bcSPaul Mackerras if (hpte_is_writable(r) && !write_ok)
6784cf302bcSPaul Mackerras r = hpte_make_readonly(r);
679342d3db7SPaul Mackerras ret = RESUME_GUEST;
680342d3db7SPaul Mackerras preempt_disable();
681342d3db7SPaul Mackerras while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
682342d3db7SPaul Mackerras cpu_relax();
683abb7c7ddSPaul Mackerras hnow_v = be64_to_cpu(hptep[0]);
684abb7c7ddSPaul Mackerras hnow_r = be64_to_cpu(hptep[1]);
685abb7c7ddSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300)) {
686abb7c7ddSPaul Mackerras hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
687abb7c7ddSPaul Mackerras hnow_r = hpte_new_to_old_r(hnow_r);
688abb7c7ddSPaul Mackerras }
68938c53af8SPaul Mackerras
69038c53af8SPaul Mackerras /*
69138c53af8SPaul Mackerras * If the HPT is being resized, don't update the HPTE,
69238c53af8SPaul Mackerras * instead let the guest retry after the resize operation is complete.
693072df813SPaul Mackerras * The synchronization for mmu_ready test vs. set is provided
69438c53af8SPaul Mackerras * by the HPTE lock.
69538c53af8SPaul Mackerras */
696072df813SPaul Mackerras if (!kvm->arch.mmu_ready)
69738c53af8SPaul Mackerras goto out_unlock;
69838c53af8SPaul Mackerras
699abb7c7ddSPaul Mackerras if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
700342d3db7SPaul Mackerras rev->guest_rpte != hpte[2])
701342d3db7SPaul Mackerras /* HPTE has been changed under us; let the guest retry */
702342d3db7SPaul Mackerras goto out_unlock;
703342d3db7SPaul Mackerras hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
704342d3db7SPaul Mackerras
7051066f772SPaul Mackerras /* Always put the HPTE in the rmap chain for the page base address */
7061066f772SPaul Mackerras rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
707342d3db7SPaul Mackerras lock_rmap(rmap);
708342d3db7SPaul Mackerras
709342d3db7SPaul Mackerras /* Check if we might have been invalidated; let the guest retry if so */
710342d3db7SPaul Mackerras ret = RESUME_GUEST;
71120ec3ebdSChao Peng if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
712342d3db7SPaul Mackerras unlock_rmap(rmap);
713342d3db7SPaul Mackerras goto out_unlock;
714342d3db7SPaul Mackerras }
7154cf302bcSPaul Mackerras
716bad3b507SPaul Mackerras /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
717bad3b507SPaul Mackerras rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
718bad3b507SPaul Mackerras r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
719bad3b507SPaul Mackerras
7206f22bd32SAlexander Graf if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) {
7214cf302bcSPaul Mackerras /* HPTE was previously valid, so we need to invalidate it */
7224cf302bcSPaul Mackerras unlock_rmap(rmap);
7236f22bd32SAlexander Graf hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
7244cf302bcSPaul Mackerras kvmppc_invalidate_hpte(kvm, hptep, index);
725bad3b507SPaul Mackerras /* don't lose previous R and C bits */
7266f22bd32SAlexander Graf r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
7274cf302bcSPaul Mackerras } else {
728342d3db7SPaul Mackerras kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
7294cf302bcSPaul Mackerras }
730342d3db7SPaul Mackerras
731abb7c7ddSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300)) {
732abb7c7ddSPaul Mackerras r = hpte_old_to_new_r(hpte[0], r);
733abb7c7ddSPaul Mackerras hpte[0] = hpte_old_to_new_v(hpte[0]);
734abb7c7ddSPaul Mackerras }
7356f22bd32SAlexander Graf hptep[1] = cpu_to_be64(r);
736342d3db7SPaul Mackerras eieio();
737a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, hpte[0]);
738342d3db7SPaul Mackerras asm volatile("ptesync" : : : "memory");
739342d3db7SPaul Mackerras preempt_enable();
7404cf302bcSPaul Mackerras if (page && hpte_is_writable(r))
741cd758a9bSPaul Mackerras set_page_dirty_lock(page);
742342d3db7SPaul Mackerras
743342d3db7SPaul Mackerras out_put:
7443c78f78aSSuresh E. Warrier trace_kvm_page_fault_exit(vcpu, hpte, ret);
7453c78f78aSSuresh E. Warrier
746cd758a9bSPaul Mackerras if (page)
747cd758a9bSPaul Mackerras put_page(page);
748342d3db7SPaul Mackerras return ret;
749342d3db7SPaul Mackerras
750342d3db7SPaul Mackerras out_unlock:
751a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
752342d3db7SPaul Mackerras preempt_enable();
753342d3db7SPaul Mackerras goto out_put;
754342d3db7SPaul Mackerras }
755342d3db7SPaul Mackerras
kvmppc_rmap_reset(struct kvm * kvm)75618c3640cSPaul Mackerras void kvmppc_rmap_reset(struct kvm *kvm)
757a64fd707SPaul Mackerras {
758a64fd707SPaul Mackerras struct kvm_memslots *slots;
759a64fd707SPaul Mackerras struct kvm_memory_slot *memslot;
760a54d8066SMaciej S. Szmigiero int srcu_idx, bkt;
761a64fd707SPaul Mackerras
762a64fd707SPaul Mackerras srcu_idx = srcu_read_lock(&kvm->srcu);
7639f6b8029SPaolo Bonzini slots = kvm_memslots(kvm);
764a54d8066SMaciej S. Szmigiero kvm_for_each_memslot(memslot, bkt, slots) {
765234ff0b7SPaul Mackerras /* Mutual exclusion with kvm_unmap_hva_range etc. */
766234ff0b7SPaul Mackerras spin_lock(&kvm->mmu_lock);
767a64fd707SPaul Mackerras /*
768a64fd707SPaul Mackerras * This assumes it is acceptable to lose reference and
769a64fd707SPaul Mackerras * change bits across a reset.
770a64fd707SPaul Mackerras */
771a64fd707SPaul Mackerras memset(memslot->arch.rmap, 0,
772a64fd707SPaul Mackerras memslot->npages * sizeof(*memslot->arch.rmap));
773234ff0b7SPaul Mackerras spin_unlock(&kvm->mmu_lock);
774a64fd707SPaul Mackerras }
775a64fd707SPaul Mackerras srcu_read_unlock(&kvm->srcu, srcu_idx);
776a64fd707SPaul Mackerras }
777a64fd707SPaul Mackerras
778639e4597SDavid Gibson /* Must be called with both HPTE and rmap locked */
kvmppc_unmap_hpte(struct kvm * kvm,unsigned long i,struct kvm_memory_slot * memslot,unsigned long * rmapp,unsigned long gfn)779639e4597SDavid Gibson static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
780e641a317SPaul Mackerras struct kvm_memory_slot *memslot,
781639e4597SDavid Gibson unsigned long *rmapp, unsigned long gfn)
782342d3db7SPaul Mackerras {
783639e4597SDavid Gibson __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
7843f9d4f5aSDavid Gibson struct revmap_entry *rev = kvm->arch.hpt.rev;
785639e4597SDavid Gibson unsigned long j, h;
786bad3b507SPaul Mackerras unsigned long ptel, psize, rcbits;
787342d3db7SPaul Mackerras
788342d3db7SPaul Mackerras j = rev[i].forw;
789342d3db7SPaul Mackerras if (j == i) {
790342d3db7SPaul Mackerras /* chain is now empty */
791bad3b507SPaul Mackerras *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
792342d3db7SPaul Mackerras } else {
793342d3db7SPaul Mackerras /* remove i from chain */
794342d3db7SPaul Mackerras h = rev[i].back;
795342d3db7SPaul Mackerras rev[h].forw = j;
796342d3db7SPaul Mackerras rev[j].back = h;
797342d3db7SPaul Mackerras rev[i].forw = rev[i].back = i;
798bad3b507SPaul Mackerras *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
799342d3db7SPaul Mackerras }
800342d3db7SPaul Mackerras
801bad3b507SPaul Mackerras /* Now check and modify the HPTE */
802342d3db7SPaul Mackerras ptel = rev[i].guest_rpte;
8038dc6cca5SPaul Mackerras psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel);
8046f22bd32SAlexander Graf if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
805342d3db7SPaul Mackerras hpte_rpn(ptel, psize) == gfn) {
8066f22bd32SAlexander Graf hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
807bad3b507SPaul Mackerras kvmppc_invalidate_hpte(kvm, hptep, i);
808f0585982SYongji Xie hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
809bad3b507SPaul Mackerras /* Harvest R and C */
8106f22bd32SAlexander Graf rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
811bad3b507SPaul Mackerras *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
812e641a317SPaul Mackerras if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap)
813e641a317SPaul Mackerras kvmppc_update_dirty_map(memslot, gfn, psize);
814a1b4a0f6SPaul Mackerras if (rcbits & ~rev[i].guest_rpte) {
815bad3b507SPaul Mackerras rev[i].guest_rpte = ptel | rcbits;
816a1b4a0f6SPaul Mackerras note_hpte_modification(kvm, &rev[i]);
817a1b4a0f6SPaul Mackerras }
818342d3db7SPaul Mackerras }
819639e4597SDavid Gibson }
820639e4597SDavid Gibson
kvm_unmap_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)82132b48bf8SNicholas Piggin static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
822639e4597SDavid Gibson unsigned long gfn)
823639e4597SDavid Gibson {
824639e4597SDavid Gibson unsigned long i;
825639e4597SDavid Gibson __be64 *hptep;
826639e4597SDavid Gibson unsigned long *rmapp;
827639e4597SDavid Gibson
828639e4597SDavid Gibson rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
829639e4597SDavid Gibson for (;;) {
830639e4597SDavid Gibson lock_rmap(rmapp);
831639e4597SDavid Gibson if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
832639e4597SDavid Gibson unlock_rmap(rmapp);
833639e4597SDavid Gibson break;
834639e4597SDavid Gibson }
835639e4597SDavid Gibson
836639e4597SDavid Gibson /*
837639e4597SDavid Gibson * To avoid an ABBA deadlock with the HPTE lock bit,
838639e4597SDavid Gibson * we can't spin on the HPTE lock while holding the
839639e4597SDavid Gibson * rmap chain lock.
840639e4597SDavid Gibson */
841639e4597SDavid Gibson i = *rmapp & KVMPPC_RMAP_INDEX;
842639e4597SDavid Gibson hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
843639e4597SDavid Gibson if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
844639e4597SDavid Gibson /* unlock rmap before spinning on the HPTE lock */
845639e4597SDavid Gibson unlock_rmap(rmapp);
846639e4597SDavid Gibson while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
847639e4597SDavid Gibson cpu_relax();
848639e4597SDavid Gibson continue;
849639e4597SDavid Gibson }
850639e4597SDavid Gibson
851e641a317SPaul Mackerras kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn);
852bad3b507SPaul Mackerras unlock_rmap(rmapp);
853a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
854342d3db7SPaul Mackerras }
855342d3db7SPaul Mackerras }
856342d3db7SPaul Mackerras
kvm_unmap_gfn_range_hv(struct kvm * kvm,struct kvm_gfn_range * range)857b1c5356eSSean Christopherson bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
858b3ae2096STakuya Yoshikawa {
85932b48bf8SNicholas Piggin gfn_t gfn;
86001756099SPaul Mackerras
86132b48bf8SNicholas Piggin if (kvm_is_radix(kvm)) {
86232b48bf8SNicholas Piggin for (gfn = range->start; gfn < range->end; gfn++)
86332b48bf8SNicholas Piggin kvm_unmap_radix(kvm, range->slot, gfn);
86432b48bf8SNicholas Piggin } else {
86532b48bf8SNicholas Piggin for (gfn = range->start; gfn < range->end; gfn++)
866da3bb206SMichael Ellerman kvm_unmap_rmapp(kvm, range->slot, gfn);
86732b48bf8SNicholas Piggin }
86832b48bf8SNicholas Piggin
86932b48bf8SNicholas Piggin return false;
870b3ae2096STakuya Yoshikawa }
871b3ae2096STakuya Yoshikawa
kvmppc_core_flush_memslot_hv(struct kvm * kvm,struct kvm_memory_slot * memslot)8723a167beaSAneesh Kumar K.V void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
8733a167beaSAneesh Kumar K.V struct kvm_memory_slot *memslot)
874dfe49dbdSPaul Mackerras {
875dfe49dbdSPaul Mackerras unsigned long gfn;
876dfe49dbdSPaul Mackerras unsigned long n;
87701756099SPaul Mackerras unsigned long *rmapp;
878dfe49dbdSPaul Mackerras
879dfe49dbdSPaul Mackerras gfn = memslot->base_gfn;
88001756099SPaul Mackerras rmapp = memslot->arch.rmap;
88101756099SPaul Mackerras if (kvm_is_radix(kvm)) {
8825af3e9d0SPaul Mackerras kvmppc_radix_flush_memslot(kvm, memslot);
8835af3e9d0SPaul Mackerras return;
88401756099SPaul Mackerras }
8855af3e9d0SPaul Mackerras
8865af3e9d0SPaul Mackerras for (n = memslot->npages; n; --n, ++gfn) {
887dfe49dbdSPaul Mackerras /*
888dfe49dbdSPaul Mackerras * Testing the present bit without locking is OK because
889dfe49dbdSPaul Mackerras * the memslot has been marked invalid already, and hence
890dfe49dbdSPaul Mackerras * no new HPTEs referencing this page can be created,
891dfe49dbdSPaul Mackerras * thus the present bit can't go from 0 to 1.
892dfe49dbdSPaul Mackerras */
893dfe49dbdSPaul Mackerras if (*rmapp & KVMPPC_RMAP_PRESENT)
89401756099SPaul Mackerras kvm_unmap_rmapp(kvm, memslot, gfn);
895dfe49dbdSPaul Mackerras ++rmapp;
896dfe49dbdSPaul Mackerras }
897dfe49dbdSPaul Mackerras }
898dfe49dbdSPaul Mackerras
kvm_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)899b1c5356eSSean Christopherson static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
900342d3db7SPaul Mackerras unsigned long gfn)
901342d3db7SPaul Mackerras {
9023f9d4f5aSDavid Gibson struct revmap_entry *rev = kvm->arch.hpt.rev;
90355514893SPaul Mackerras unsigned long head, i, j;
9046f22bd32SAlexander Graf __be64 *hptep;
90515eb1b6aSBo Liu bool ret = false;
90601756099SPaul Mackerras unsigned long *rmapp;
90755514893SPaul Mackerras
90801756099SPaul Mackerras rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
90955514893SPaul Mackerras retry:
91055514893SPaul Mackerras lock_rmap(rmapp);
91155514893SPaul Mackerras if (*rmapp & KVMPPC_RMAP_REFERENCED) {
912bad3b507SPaul Mackerras *rmapp &= ~KVMPPC_RMAP_REFERENCED;
91315eb1b6aSBo Liu ret = true;
91455514893SPaul Mackerras }
91555514893SPaul Mackerras if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
91655514893SPaul Mackerras unlock_rmap(rmapp);
91755514893SPaul Mackerras return ret;
91855514893SPaul Mackerras }
91955514893SPaul Mackerras
92055514893SPaul Mackerras i = head = *rmapp & KVMPPC_RMAP_INDEX;
92155514893SPaul Mackerras do {
9223f9d4f5aSDavid Gibson hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
92355514893SPaul Mackerras j = rev[i].forw;
92455514893SPaul Mackerras
92555514893SPaul Mackerras /* If this HPTE isn't referenced, ignore it */
9266f22bd32SAlexander Graf if (!(be64_to_cpu(hptep[1]) & HPTE_R_R))
92755514893SPaul Mackerras continue;
92855514893SPaul Mackerras
92955514893SPaul Mackerras if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
93055514893SPaul Mackerras /* unlock rmap before spinning on the HPTE lock */
93155514893SPaul Mackerras unlock_rmap(rmapp);
9326f22bd32SAlexander Graf while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK)
93355514893SPaul Mackerras cpu_relax();
93455514893SPaul Mackerras goto retry;
93555514893SPaul Mackerras }
93655514893SPaul Mackerras
93755514893SPaul Mackerras /* Now check and modify the HPTE */
9386f22bd32SAlexander Graf if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
9396f22bd32SAlexander Graf (be64_to_cpu(hptep[1]) & HPTE_R_R)) {
94055514893SPaul Mackerras kvmppc_clear_ref_hpte(kvm, hptep, i);
941a1b4a0f6SPaul Mackerras if (!(rev[i].guest_rpte & HPTE_R_R)) {
94255514893SPaul Mackerras rev[i].guest_rpte |= HPTE_R_R;
943a1b4a0f6SPaul Mackerras note_hpte_modification(kvm, &rev[i]);
944a1b4a0f6SPaul Mackerras }
94515eb1b6aSBo Liu ret = true;
94655514893SPaul Mackerras }
947a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
94855514893SPaul Mackerras } while ((i = j) != head);
94955514893SPaul Mackerras
95055514893SPaul Mackerras unlock_rmap(rmapp);
95155514893SPaul Mackerras return ret;
952342d3db7SPaul Mackerras }
953342d3db7SPaul Mackerras
kvm_age_gfn_hv(struct kvm * kvm,struct kvm_gfn_range * range)954b1c5356eSSean Christopherson bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
955342d3db7SPaul Mackerras {
95632b48bf8SNicholas Piggin gfn_t gfn;
95732b48bf8SNicholas Piggin bool ret = false;
95801756099SPaul Mackerras
95932b48bf8SNicholas Piggin if (kvm_is_radix(kvm)) {
96032b48bf8SNicholas Piggin for (gfn = range->start; gfn < range->end; gfn++)
96132b48bf8SNicholas Piggin ret |= kvm_age_radix(kvm, range->slot, gfn);
96232b48bf8SNicholas Piggin } else {
96332b48bf8SNicholas Piggin for (gfn = range->start; gfn < range->end; gfn++)
96432b48bf8SNicholas Piggin ret |= kvm_age_rmapp(kvm, range->slot, gfn);
96532b48bf8SNicholas Piggin }
96632b48bf8SNicholas Piggin
96732b48bf8SNicholas Piggin return ret;
968342d3db7SPaul Mackerras }
969342d3db7SPaul Mackerras
kvm_test_age_rmapp(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)970b1c5356eSSean Christopherson static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
971342d3db7SPaul Mackerras unsigned long gfn)
972342d3db7SPaul Mackerras {
9733f9d4f5aSDavid Gibson struct revmap_entry *rev = kvm->arch.hpt.rev;
97455514893SPaul Mackerras unsigned long head, i, j;
97555514893SPaul Mackerras unsigned long *hp;
976b1c5356eSSean Christopherson bool ret = true;
97701756099SPaul Mackerras unsigned long *rmapp;
97855514893SPaul Mackerras
97901756099SPaul Mackerras rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
98055514893SPaul Mackerras if (*rmapp & KVMPPC_RMAP_REFERENCED)
981b1c5356eSSean Christopherson return true;
98255514893SPaul Mackerras
98355514893SPaul Mackerras lock_rmap(rmapp);
98455514893SPaul Mackerras if (*rmapp & KVMPPC_RMAP_REFERENCED)
98555514893SPaul Mackerras goto out;
98655514893SPaul Mackerras
98755514893SPaul Mackerras if (*rmapp & KVMPPC_RMAP_PRESENT) {
98855514893SPaul Mackerras i = head = *rmapp & KVMPPC_RMAP_INDEX;
98955514893SPaul Mackerras do {
9903f9d4f5aSDavid Gibson hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
99155514893SPaul Mackerras j = rev[i].forw;
9926f22bd32SAlexander Graf if (be64_to_cpu(hp[1]) & HPTE_R_R)
99355514893SPaul Mackerras goto out;
99455514893SPaul Mackerras } while ((i = j) != head);
99555514893SPaul Mackerras }
996b1c5356eSSean Christopherson ret = false;
99755514893SPaul Mackerras
99855514893SPaul Mackerras out:
99955514893SPaul Mackerras unlock_rmap(rmapp);
100055514893SPaul Mackerras return ret;
1001342d3db7SPaul Mackerras }
1002342d3db7SPaul Mackerras
kvm_test_age_gfn_hv(struct kvm * kvm,struct kvm_gfn_range * range)1003b1c5356eSSean Christopherson bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
1004342d3db7SPaul Mackerras {
100532b48bf8SNicholas Piggin WARN_ON(range->start + 1 != range->end);
100601756099SPaul Mackerras
100732b48bf8SNicholas Piggin if (kvm_is_radix(kvm))
100832b48bf8SNicholas Piggin return kvm_test_age_radix(kvm, range->slot, range->start);
100932b48bf8SNicholas Piggin else
1010b1c5356eSSean Christopherson return kvm_test_age_rmapp(kvm, range->slot, range->start);
1011342d3db7SPaul Mackerras }
1012342d3db7SPaul Mackerras
kvm_set_spte_gfn_hv(struct kvm * kvm,struct kvm_gfn_range * range)1013b1c5356eSSean Christopherson bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
1014342d3db7SPaul Mackerras {
101532b48bf8SNicholas Piggin WARN_ON(range->start + 1 != range->end);
101601756099SPaul Mackerras
101732b48bf8SNicholas Piggin if (kvm_is_radix(kvm))
101832b48bf8SNicholas Piggin kvm_unmap_radix(kvm, range->slot, range->start);
101932b48bf8SNicholas Piggin else
102032b48bf8SNicholas Piggin kvm_unmap_rmapp(kvm, range->slot, range->start);
102132b48bf8SNicholas Piggin
102232b48bf8SNicholas Piggin return false;
1023de56a948SPaul Mackerras }
1024de56a948SPaul Mackerras
vcpus_running(struct kvm * kvm)10256c576e74SPaul Mackerras static int vcpus_running(struct kvm *kvm)
10266c576e74SPaul Mackerras {
10276c576e74SPaul Mackerras return atomic_read(&kvm->arch.vcpus_running) != 0;
10286c576e74SPaul Mackerras }
10296c576e74SPaul Mackerras
1030687414beSAlexey Kardashevskiy /*
1031687414beSAlexey Kardashevskiy * Returns the number of system pages that are dirty.
1032687414beSAlexey Kardashevskiy * This can be more than 1 if we find a huge-page HPTE.
1033687414beSAlexey Kardashevskiy */
kvm_test_clear_dirty_npages(struct kvm * kvm,unsigned long * rmapp)1034687414beSAlexey Kardashevskiy static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
103582ed3616SPaul Mackerras {
10363f9d4f5aSDavid Gibson struct revmap_entry *rev = kvm->arch.hpt.rev;
103782ed3616SPaul Mackerras unsigned long head, i, j;
1038687414beSAlexey Kardashevskiy unsigned long n;
10396c576e74SPaul Mackerras unsigned long v, r;
10406f22bd32SAlexander Graf __be64 *hptep;
1041687414beSAlexey Kardashevskiy int npages_dirty = 0;
104282ed3616SPaul Mackerras
104382ed3616SPaul Mackerras retry:
104482ed3616SPaul Mackerras lock_rmap(rmapp);
104582ed3616SPaul Mackerras if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
104682ed3616SPaul Mackerras unlock_rmap(rmapp);
1047687414beSAlexey Kardashevskiy return npages_dirty;
104882ed3616SPaul Mackerras }
104982ed3616SPaul Mackerras
105082ed3616SPaul Mackerras i = head = *rmapp & KVMPPC_RMAP_INDEX;
105182ed3616SPaul Mackerras do {
10526f22bd32SAlexander Graf unsigned long hptep1;
10533f9d4f5aSDavid Gibson hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
105482ed3616SPaul Mackerras j = rev[i].forw;
105582ed3616SPaul Mackerras
10566c576e74SPaul Mackerras /*
10576c576e74SPaul Mackerras * Checking the C (changed) bit here is racy since there
10586c576e74SPaul Mackerras * is no guarantee about when the hardware writes it back.
10596c576e74SPaul Mackerras * If the HPTE is not writable then it is stable since the
10606c576e74SPaul Mackerras * page can't be written to, and we would have done a tlbie
10616c576e74SPaul Mackerras * (which forces the hardware to complete any writeback)
10626c576e74SPaul Mackerras * when making the HPTE read-only.
10636c576e74SPaul Mackerras * If vcpus are running then this call is racy anyway
10646c576e74SPaul Mackerras * since the page could get dirtied subsequently, so we
10656c576e74SPaul Mackerras * expect there to be a further call which would pick up
10666c576e74SPaul Mackerras * any delayed C bit writeback.
10676c576e74SPaul Mackerras * Otherwise we need to do the tlbie even if C==0 in
10686c576e74SPaul Mackerras * order to pick up any delayed writeback of C.
10696c576e74SPaul Mackerras */
10706f22bd32SAlexander Graf hptep1 = be64_to_cpu(hptep[1]);
10716f22bd32SAlexander Graf if (!(hptep1 & HPTE_R_C) &&
10726f22bd32SAlexander Graf (!hpte_is_writable(hptep1) || vcpus_running(kvm)))
107382ed3616SPaul Mackerras continue;
107482ed3616SPaul Mackerras
107582ed3616SPaul Mackerras if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
107682ed3616SPaul Mackerras /* unlock rmap before spinning on the HPTE lock */
107782ed3616SPaul Mackerras unlock_rmap(rmapp);
10786f22bd32SAlexander Graf while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK))
107982ed3616SPaul Mackerras cpu_relax();
108082ed3616SPaul Mackerras goto retry;
108182ed3616SPaul Mackerras }
108282ed3616SPaul Mackerras
108382ed3616SPaul Mackerras /* Now check and modify the HPTE */
1084f6fb9e84SAneesh Kumar K.V if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) {
1085a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, be64_to_cpu(hptep[0]));
10866c576e74SPaul Mackerras continue;
1087f6fb9e84SAneesh Kumar K.V }
10886c576e74SPaul Mackerras
10896c576e74SPaul Mackerras /* need to make it temporarily absent so C is stable */
10906f22bd32SAlexander Graf hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
109182ed3616SPaul Mackerras kvmppc_invalidate_hpte(kvm, hptep, i);
10926f22bd32SAlexander Graf v = be64_to_cpu(hptep[0]);
10936f22bd32SAlexander Graf r = be64_to_cpu(hptep[1]);
10946c576e74SPaul Mackerras if (r & HPTE_R_C) {
10956f22bd32SAlexander Graf hptep[1] = cpu_to_be64(r & ~HPTE_R_C);
1096a1b4a0f6SPaul Mackerras if (!(rev[i].guest_rpte & HPTE_R_C)) {
109782ed3616SPaul Mackerras rev[i].guest_rpte |= HPTE_R_C;
1098a1b4a0f6SPaul Mackerras note_hpte_modification(kvm, &rev[i]);
1099a1b4a0f6SPaul Mackerras }
11008dc6cca5SPaul Mackerras n = kvmppc_actual_pgsz(v, r);
1101687414beSAlexey Kardashevskiy n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
1102687414beSAlexey Kardashevskiy if (n > npages_dirty)
1103687414beSAlexey Kardashevskiy npages_dirty = n;
11046c576e74SPaul Mackerras eieio();
110582ed3616SPaul Mackerras }
1106a4bd6eb0SAneesh Kumar K.V v &= ~HPTE_V_ABSENT;
11076c576e74SPaul Mackerras v |= HPTE_V_VALID;
1108a4bd6eb0SAneesh Kumar K.V __unlock_hpte(hptep, v);
110982ed3616SPaul Mackerras } while ((i = j) != head);
111082ed3616SPaul Mackerras
111182ed3616SPaul Mackerras unlock_rmap(rmapp);
1112687414beSAlexey Kardashevskiy return npages_dirty;
111382ed3616SPaul Mackerras }
111482ed3616SPaul Mackerras
kvmppc_harvest_vpa_dirty(struct kvmppc_vpa * vpa,struct kvm_memory_slot * memslot,unsigned long * map)11158f7b79b8SPaul Mackerras void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1116c35635efSPaul Mackerras struct kvm_memory_slot *memslot,
1117c35635efSPaul Mackerras unsigned long *map)
1118c35635efSPaul Mackerras {
1119c35635efSPaul Mackerras unsigned long gfn;
1120c35635efSPaul Mackerras
1121c35635efSPaul Mackerras if (!vpa->dirty || !vpa->pinned_addr)
1122c35635efSPaul Mackerras return;
1123c35635efSPaul Mackerras gfn = vpa->gpa >> PAGE_SHIFT;
1124c35635efSPaul Mackerras if (gfn < memslot->base_gfn ||
1125c35635efSPaul Mackerras gfn >= memslot->base_gfn + memslot->npages)
1126c35635efSPaul Mackerras return;
1127c35635efSPaul Mackerras
1128c35635efSPaul Mackerras vpa->dirty = false;
1129c35635efSPaul Mackerras if (map)
1130c35635efSPaul Mackerras __set_bit_le(gfn - memslot->base_gfn, map);
1131c35635efSPaul Mackerras }
1132c35635efSPaul Mackerras
kvmppc_hv_get_dirty_log_hpt(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map)11338f7b79b8SPaul Mackerras long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
11348f7b79b8SPaul Mackerras struct kvm_memory_slot *memslot, unsigned long *map)
113582ed3616SPaul Mackerras {
1136e641a317SPaul Mackerras unsigned long i;
1137dfe49dbdSPaul Mackerras unsigned long *rmapp;
113882ed3616SPaul Mackerras
113982ed3616SPaul Mackerras preempt_disable();
1140d89cc617STakuya Yoshikawa rmapp = memslot->arch.rmap;
114182ed3616SPaul Mackerras for (i = 0; i < memslot->npages; ++i) {
1142687414beSAlexey Kardashevskiy int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
1143687414beSAlexey Kardashevskiy /*
1144687414beSAlexey Kardashevskiy * Note that if npages > 0 then i must be a multiple of npages,
1145687414beSAlexey Kardashevskiy * since we always put huge-page HPTEs in the rmap chain
1146687414beSAlexey Kardashevskiy * corresponding to their page base address.
1147687414beSAlexey Kardashevskiy */
1148e641a317SPaul Mackerras if (npages)
1149e641a317SPaul Mackerras set_dirty_bits(map, i, npages);
115082ed3616SPaul Mackerras ++rmapp;
115182ed3616SPaul Mackerras }
115282ed3616SPaul Mackerras preempt_enable();
115382ed3616SPaul Mackerras return 0;
115482ed3616SPaul Mackerras }
115582ed3616SPaul Mackerras
kvmppc_pin_guest_page(struct kvm * kvm,unsigned long gpa,unsigned long * nb_ret)115693e60249SPaul Mackerras void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
115793e60249SPaul Mackerras unsigned long *nb_ret)
115893e60249SPaul Mackerras {
115993e60249SPaul Mackerras struct kvm_memory_slot *memslot;
116093e60249SPaul Mackerras unsigned long gfn = gpa >> PAGE_SHIFT;
1161342d3db7SPaul Mackerras struct page *page, *pages[1];
1162342d3db7SPaul Mackerras int npages;
1163c35635efSPaul Mackerras unsigned long hva, offset;
11642c9097e4SPaul Mackerras int srcu_idx;
116593e60249SPaul Mackerras
11662c9097e4SPaul Mackerras srcu_idx = srcu_read_lock(&kvm->srcu);
116793e60249SPaul Mackerras memslot = gfn_to_memslot(kvm, gfn);
116893e60249SPaul Mackerras if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
11692c9097e4SPaul Mackerras goto err;
1170342d3db7SPaul Mackerras hva = gfn_to_hva_memslot(memslot, gfn);
117173b0140bSIra Weiny npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
1172342d3db7SPaul Mackerras if (npages < 1)
11732c9097e4SPaul Mackerras goto err;
1174342d3db7SPaul Mackerras page = pages[0];
11752c9097e4SPaul Mackerras srcu_read_unlock(&kvm->srcu, srcu_idx);
11762c9097e4SPaul Mackerras
1177c35635efSPaul Mackerras offset = gpa & (PAGE_SIZE - 1);
117893e60249SPaul Mackerras if (nb_ret)
1179c35635efSPaul Mackerras *nb_ret = PAGE_SIZE - offset;
118093e60249SPaul Mackerras return page_address(page) + offset;
11812c9097e4SPaul Mackerras
11822c9097e4SPaul Mackerras err:
11832c9097e4SPaul Mackerras srcu_read_unlock(&kvm->srcu, srcu_idx);
11842c9097e4SPaul Mackerras return NULL;
118593e60249SPaul Mackerras }
118693e60249SPaul Mackerras
kvmppc_unpin_guest_page(struct kvm * kvm,void * va,unsigned long gpa,bool dirty)1187c35635efSPaul Mackerras void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1188c35635efSPaul Mackerras bool dirty)
118993e60249SPaul Mackerras {
119093e60249SPaul Mackerras struct page *page = virt_to_page(va);
1191c35635efSPaul Mackerras struct kvm_memory_slot *memslot;
1192c35635efSPaul Mackerras unsigned long gfn;
1193c35635efSPaul Mackerras int srcu_idx;
119493e60249SPaul Mackerras
119593e60249SPaul Mackerras put_page(page);
1196c35635efSPaul Mackerras
1197c17b98cfSPaul Mackerras if (!dirty)
1198c35635efSPaul Mackerras return;
1199c35635efSPaul Mackerras
1200e641a317SPaul Mackerras /* We need to mark this page dirty in the memslot dirty_bitmap, if any */
1201c35635efSPaul Mackerras gfn = gpa >> PAGE_SHIFT;
1202c35635efSPaul Mackerras srcu_idx = srcu_read_lock(&kvm->srcu);
1203c35635efSPaul Mackerras memslot = gfn_to_memslot(kvm, gfn);
1204e641a317SPaul Mackerras if (memslot && memslot->dirty_bitmap)
1205e641a317SPaul Mackerras set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap);
1206c35635efSPaul Mackerras srcu_read_unlock(&kvm->srcu, srcu_idx);
120793e60249SPaul Mackerras }
120893e60249SPaul Mackerras
1209a2932923SPaul Mackerras /*
12105e985969SDavid Gibson * HPT resizing
12115e985969SDavid Gibson */
resize_hpt_allocate(struct kvm_resize_hpt * resize)12125e985969SDavid Gibson static int resize_hpt_allocate(struct kvm_resize_hpt *resize)
12135e985969SDavid Gibson {
1214b5baa687SDavid Gibson int rc;
1215b5baa687SDavid Gibson
1216b5baa687SDavid Gibson rc = kvmppc_allocate_hpt(&resize->hpt, resize->order);
1217b5baa687SDavid Gibson if (rc < 0)
1218b5baa687SDavid Gibson return rc;
1219b5baa687SDavid Gibson
122061119786SXueBing Chen resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__,
1221b5baa687SDavid Gibson resize->hpt.virt);
1222b5baa687SDavid Gibson
12235e985969SDavid Gibson return 0;
12245e985969SDavid Gibson }
12255e985969SDavid Gibson
resize_hpt_rehash_hpte(struct kvm_resize_hpt * resize,unsigned long idx)1226b5baa687SDavid Gibson static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
1227b5baa687SDavid Gibson unsigned long idx)
1228b5baa687SDavid Gibson {
1229b5baa687SDavid Gibson struct kvm *kvm = resize->kvm;
1230b5baa687SDavid Gibson struct kvm_hpt_info *old = &kvm->arch.hpt;
1231b5baa687SDavid Gibson struct kvm_hpt_info *new = &resize->hpt;
1232b5baa687SDavid Gibson unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1;
1233b5baa687SDavid Gibson unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1;
1234b5baa687SDavid Gibson __be64 *hptep, *new_hptep;
1235b5baa687SDavid Gibson unsigned long vpte, rpte, guest_rpte;
1236b5baa687SDavid Gibson int ret;
1237b5baa687SDavid Gibson struct revmap_entry *rev;
1238ded13fc1SPaul Mackerras unsigned long apsize, avpn, pteg, hash;
1239b5baa687SDavid Gibson unsigned long new_idx, new_pteg, replace_vpte;
1240ded13fc1SPaul Mackerras int pshift;
1241b5baa687SDavid Gibson
1242b5baa687SDavid Gibson hptep = (__be64 *)(old->virt + (idx << 4));
1243b5baa687SDavid Gibson
1244b5baa687SDavid Gibson /* Guest is stopped, so new HPTEs can't be added or faulted
1245b5baa687SDavid Gibson * in, only unmapped or altered by host actions. So, it's
1246b5baa687SDavid Gibson * safe to check this before we take the HPTE lock */
1247b5baa687SDavid Gibson vpte = be64_to_cpu(hptep[0]);
1248b5baa687SDavid Gibson if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1249b5baa687SDavid Gibson return 0; /* nothing to do */
1250b5baa687SDavid Gibson
1251b5baa687SDavid Gibson while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
1252b5baa687SDavid Gibson cpu_relax();
1253b5baa687SDavid Gibson
1254b5baa687SDavid Gibson vpte = be64_to_cpu(hptep[0]);
1255b5baa687SDavid Gibson
1256b5baa687SDavid Gibson ret = 0;
1257b5baa687SDavid Gibson if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT))
1258b5baa687SDavid Gibson /* Nothing to do */
1259b5baa687SDavid Gibson goto out;
1260b5baa687SDavid Gibson
1261790a9df5SDavid Gibson if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1262790a9df5SDavid Gibson rpte = be64_to_cpu(hptep[1]);
1263790a9df5SDavid Gibson vpte = hpte_new_to_old_v(vpte, rpte);
1264790a9df5SDavid Gibson }
1265790a9df5SDavid Gibson
1266b5baa687SDavid Gibson /* Unmap */
1267b5baa687SDavid Gibson rev = &old->rev[idx];
1268b5baa687SDavid Gibson guest_rpte = rev->guest_rpte;
1269b5baa687SDavid Gibson
1270b5baa687SDavid Gibson ret = -EIO;
12718dc6cca5SPaul Mackerras apsize = kvmppc_actual_pgsz(vpte, guest_rpte);
1272b5baa687SDavid Gibson if (!apsize)
1273b5baa687SDavid Gibson goto out;
1274b5baa687SDavid Gibson
1275b5baa687SDavid Gibson if (vpte & HPTE_V_VALID) {
1276b5baa687SDavid Gibson unsigned long gfn = hpte_rpn(guest_rpte, apsize);
1277b5baa687SDavid Gibson int srcu_idx = srcu_read_lock(&kvm->srcu);
1278b5baa687SDavid Gibson struct kvm_memory_slot *memslot =
1279b5baa687SDavid Gibson __gfn_to_memslot(kvm_memslots(kvm), gfn);
1280b5baa687SDavid Gibson
1281b5baa687SDavid Gibson if (memslot) {
1282b5baa687SDavid Gibson unsigned long *rmapp;
1283b5baa687SDavid Gibson rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1284b5baa687SDavid Gibson
1285b5baa687SDavid Gibson lock_rmap(rmapp);
1286e641a317SPaul Mackerras kvmppc_unmap_hpte(kvm, idx, memslot, rmapp, gfn);
1287b5baa687SDavid Gibson unlock_rmap(rmapp);
1288b5baa687SDavid Gibson }
1289b5baa687SDavid Gibson
1290b5baa687SDavid Gibson srcu_read_unlock(&kvm->srcu, srcu_idx);
1291b5baa687SDavid Gibson }
1292b5baa687SDavid Gibson
1293b5baa687SDavid Gibson /* Reload PTE after unmap */
1294b5baa687SDavid Gibson vpte = be64_to_cpu(hptep[0]);
1295b5baa687SDavid Gibson BUG_ON(vpte & HPTE_V_VALID);
1296b5baa687SDavid Gibson BUG_ON(!(vpte & HPTE_V_ABSENT));
1297b5baa687SDavid Gibson
1298b5baa687SDavid Gibson ret = 0;
1299b5baa687SDavid Gibson if (!(vpte & HPTE_V_BOLTED))
1300b5baa687SDavid Gibson goto out;
1301b5baa687SDavid Gibson
1302b5baa687SDavid Gibson rpte = be64_to_cpu(hptep[1]);
1303790a9df5SDavid Gibson
1304790a9df5SDavid Gibson if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1305790a9df5SDavid Gibson vpte = hpte_new_to_old_v(vpte, rpte);
1306790a9df5SDavid Gibson rpte = hpte_new_to_old_r(rpte);
1307790a9df5SDavid Gibson }
1308790a9df5SDavid Gibson
1309ded13fc1SPaul Mackerras pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
1310ded13fc1SPaul Mackerras avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
1311b5baa687SDavid Gibson pteg = idx / HPTES_PER_GROUP;
1312b5baa687SDavid Gibson if (vpte & HPTE_V_SECONDARY)
1313b5baa687SDavid Gibson pteg = ~pteg;
1314b5baa687SDavid Gibson
1315b5baa687SDavid Gibson if (!(vpte & HPTE_V_1TB_SEG)) {
1316b5baa687SDavid Gibson unsigned long offset, vsid;
1317b5baa687SDavid Gibson
1318b5baa687SDavid Gibson /* We only have 28 - 23 bits of offset in avpn */
1319b5baa687SDavid Gibson offset = (avpn & 0x1f) << 23;
1320b5baa687SDavid Gibson vsid = avpn >> 5;
1321b5baa687SDavid Gibson /* We can find more bits from the pteg value */
1322ded13fc1SPaul Mackerras if (pshift < 23)
1323ded13fc1SPaul Mackerras offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
1324b5baa687SDavid Gibson
1325ded13fc1SPaul Mackerras hash = vsid ^ (offset >> pshift);
1326b5baa687SDavid Gibson } else {
1327b5baa687SDavid Gibson unsigned long offset, vsid;
1328b5baa687SDavid Gibson
1329b5baa687SDavid Gibson /* We only have 40 - 23 bits of seg_off in avpn */
1330b5baa687SDavid Gibson offset = (avpn & 0x1ffff) << 23;
1331b5baa687SDavid Gibson vsid = avpn >> 17;
1332ded13fc1SPaul Mackerras if (pshift < 23)
1333ded13fc1SPaul Mackerras offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
1334b5baa687SDavid Gibson
1335ded13fc1SPaul Mackerras hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
1336b5baa687SDavid Gibson }
1337b5baa687SDavid Gibson
1338b5baa687SDavid Gibson new_pteg = hash & new_hash_mask;
133905f2bb03SPaul Mackerras if (vpte & HPTE_V_SECONDARY)
134005f2bb03SPaul Mackerras new_pteg = ~hash & new_hash_mask;
1341b5baa687SDavid Gibson
1342b5baa687SDavid Gibson new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
1343b5baa687SDavid Gibson new_hptep = (__be64 *)(new->virt + (new_idx << 4));
1344b5baa687SDavid Gibson
1345b5baa687SDavid Gibson replace_vpte = be64_to_cpu(new_hptep[0]);
1346790a9df5SDavid Gibson if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1347790a9df5SDavid Gibson unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
1348790a9df5SDavid Gibson replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
1349790a9df5SDavid Gibson }
1350b5baa687SDavid Gibson
1351b5baa687SDavid Gibson if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1352b5baa687SDavid Gibson BUG_ON(new->order >= old->order);
1353b5baa687SDavid Gibson
1354b5baa687SDavid Gibson if (replace_vpte & HPTE_V_BOLTED) {
1355b5baa687SDavid Gibson if (vpte & HPTE_V_BOLTED)
1356b5baa687SDavid Gibson /* Bolted collision, nothing we can do */
1357b5baa687SDavid Gibson ret = -ENOSPC;
1358b5baa687SDavid Gibson /* Discard the new HPTE */
1359b5baa687SDavid Gibson goto out;
1360b5baa687SDavid Gibson }
1361b5baa687SDavid Gibson
1362b5baa687SDavid Gibson /* Discard the previous HPTE */
1363b5baa687SDavid Gibson }
1364b5baa687SDavid Gibson
1365790a9df5SDavid Gibson if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1366790a9df5SDavid Gibson rpte = hpte_old_to_new_r(vpte, rpte);
1367790a9df5SDavid Gibson vpte = hpte_old_to_new_v(vpte);
1368790a9df5SDavid Gibson }
1369790a9df5SDavid Gibson
1370b5baa687SDavid Gibson new_hptep[1] = cpu_to_be64(rpte);
1371b5baa687SDavid Gibson new->rev[new_idx].guest_rpte = guest_rpte;
1372b5baa687SDavid Gibson /* No need for a barrier, since new HPT isn't active */
1373b5baa687SDavid Gibson new_hptep[0] = cpu_to_be64(vpte);
1374b5baa687SDavid Gibson unlock_hpte(new_hptep, vpte);
1375b5baa687SDavid Gibson
1376b5baa687SDavid Gibson out:
1377b5baa687SDavid Gibson unlock_hpte(hptep, vpte);
1378b5baa687SDavid Gibson return ret;
1379b5baa687SDavid Gibson }
1380b5baa687SDavid Gibson
resize_hpt_rehash(struct kvm_resize_hpt * resize)13815e985969SDavid Gibson static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
13825e985969SDavid Gibson {
1383b5baa687SDavid Gibson struct kvm *kvm = resize->kvm;
1384b5baa687SDavid Gibson unsigned long i;
1385b5baa687SDavid Gibson int rc;
1386b5baa687SDavid Gibson
1387b5baa687SDavid Gibson for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
1388b5baa687SDavid Gibson rc = resize_hpt_rehash_hpte(resize, i);
1389b5baa687SDavid Gibson if (rc != 0)
1390b5baa687SDavid Gibson return rc;
1391b5baa687SDavid Gibson }
1392b5baa687SDavid Gibson
1393b5baa687SDavid Gibson return 0;
13945e985969SDavid Gibson }
13955e985969SDavid Gibson
resize_hpt_pivot(struct kvm_resize_hpt * resize)13965e985969SDavid Gibson static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
13975e985969SDavid Gibson {
1398b5baa687SDavid Gibson struct kvm *kvm = resize->kvm;
1399b5baa687SDavid Gibson struct kvm_hpt_info hpt_tmp;
1400b5baa687SDavid Gibson
1401b5baa687SDavid Gibson /* Exchange the pending tables in the resize structure with
1402b5baa687SDavid Gibson * the active tables */
1403b5baa687SDavid Gibson
1404b5baa687SDavid Gibson resize_hpt_debug(resize, "resize_hpt_pivot()\n");
1405b5baa687SDavid Gibson
1406b5baa687SDavid Gibson spin_lock(&kvm->mmu_lock);
1407b5baa687SDavid Gibson asm volatile("ptesync" : : : "memory");
1408b5baa687SDavid Gibson
1409b5baa687SDavid Gibson hpt_tmp = kvm->arch.hpt;
1410b5baa687SDavid Gibson kvmppc_set_hpt(kvm, &resize->hpt);
1411b5baa687SDavid Gibson resize->hpt = hpt_tmp;
1412b5baa687SDavid Gibson
1413b5baa687SDavid Gibson spin_unlock(&kvm->mmu_lock);
1414b5baa687SDavid Gibson
1415b5baa687SDavid Gibson synchronize_srcu_expedited(&kvm->srcu);
1416b5baa687SDavid Gibson
1417790a9df5SDavid Gibson if (cpu_has_feature(CPU_FTR_ARCH_300))
1418790a9df5SDavid Gibson kvmppc_setup_partition_table(kvm);
1419790a9df5SDavid Gibson
1420b5baa687SDavid Gibson resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
14215e985969SDavid Gibson }
14225e985969SDavid Gibson
resize_hpt_release(struct kvm * kvm,struct kvm_resize_hpt * resize)14235e985969SDavid Gibson static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
14245e985969SDavid Gibson {
14250d4ee88dSPaul Mackerras if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
14264ed11aeeSSerhii Popovych return;
1427b5baa687SDavid Gibson
14285b73d634SDavid Gibson if (!resize)
14295b73d634SDavid Gibson return;
14305b73d634SDavid Gibson
14314ed11aeeSSerhii Popovych if (resize->error != -EBUSY) {
1432b5baa687SDavid Gibson if (resize->hpt.virt)
1433b5baa687SDavid Gibson kvmppc_free_hpt(&resize->hpt);
14345e985969SDavid Gibson kfree(resize);
14355e985969SDavid Gibson }
14365e985969SDavid Gibson
14374ed11aeeSSerhii Popovych if (kvm->arch.resize_hpt == resize)
14384ed11aeeSSerhii Popovych kvm->arch.resize_hpt = NULL;
14394ed11aeeSSerhii Popovych }
14404ed11aeeSSerhii Popovych
resize_hpt_prepare_work(struct work_struct * work)14415e985969SDavid Gibson static void resize_hpt_prepare_work(struct work_struct *work)
14425e985969SDavid Gibson {
14435e985969SDavid Gibson struct kvm_resize_hpt *resize = container_of(work,
14445e985969SDavid Gibson struct kvm_resize_hpt,
14455e985969SDavid Gibson work);
14465e985969SDavid Gibson struct kvm *kvm = resize->kvm;
14474ed11aeeSSerhii Popovych int err = 0;
14485e985969SDavid Gibson
14493073774eSSerhii Popovych if (WARN_ON(resize->error != -EBUSY))
14503073774eSSerhii Popovych return;
14513073774eSSerhii Popovych
14520d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
14534ed11aeeSSerhii Popovych
14544ed11aeeSSerhii Popovych /* Request is still current? */
14554ed11aeeSSerhii Popovych if (kvm->arch.resize_hpt == resize) {
14564ed11aeeSSerhii Popovych /* We may request large allocations here:
14570d4ee88dSPaul Mackerras * do not sleep with kvm->arch.mmu_setup_lock held for a while.
14584ed11aeeSSerhii Popovych */
14590d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
14604ed11aeeSSerhii Popovych
146161119786SXueBing Chen resize_hpt_debug(resize, "%s(): order = %d\n", __func__,
14625e985969SDavid Gibson resize->order);
14635e985969SDavid Gibson
14645e985969SDavid Gibson err = resize_hpt_allocate(resize);
14655e985969SDavid Gibson
14663073774eSSerhii Popovych /* We have strict assumption about -EBUSY
14673073774eSSerhii Popovych * when preparing for HPT resize.
14683073774eSSerhii Popovych */
14693073774eSSerhii Popovych if (WARN_ON(err == -EBUSY))
14703073774eSSerhii Popovych err = -EINPROGRESS;
14713073774eSSerhii Popovych
14720d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
14734ed11aeeSSerhii Popovych /* It is possible that kvm->arch.resize_hpt != resize
14740d4ee88dSPaul Mackerras * after we grab kvm->arch.mmu_setup_lock again.
14754ed11aeeSSerhii Popovych */
14764ed11aeeSSerhii Popovych }
14775e985969SDavid Gibson
14785e985969SDavid Gibson resize->error = err;
14795e985969SDavid Gibson
14804ed11aeeSSerhii Popovych if (kvm->arch.resize_hpt != resize)
14814ed11aeeSSerhii Popovych resize_hpt_release(kvm, resize);
14824ed11aeeSSerhii Popovych
14830d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
14845e985969SDavid Gibson }
14855e985969SDavid Gibson
kvm_vm_ioctl_resize_hpt_prepare(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt)148667c48662SThomas Huth int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
14875e985969SDavid Gibson struct kvm_ppc_resize_hpt *rhpt)
14885e985969SDavid Gibson {
14895e985969SDavid Gibson unsigned long flags = rhpt->flags;
14905e985969SDavid Gibson unsigned long shift = rhpt->shift;
14915e985969SDavid Gibson struct kvm_resize_hpt *resize;
14925e985969SDavid Gibson int ret;
14935e985969SDavid Gibson
1494891f1ebfSPaul Mackerras if (flags != 0 || kvm_is_radix(kvm))
14955e985969SDavid Gibson return -EINVAL;
14965e985969SDavid Gibson
14975e985969SDavid Gibson if (shift && ((shift < 18) || (shift > 46)))
14985e985969SDavid Gibson return -EINVAL;
14995e985969SDavid Gibson
15000d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
15015e985969SDavid Gibson
15025e985969SDavid Gibson resize = kvm->arch.resize_hpt;
15035e985969SDavid Gibson
15045e985969SDavid Gibson if (resize) {
15055e985969SDavid Gibson if (resize->order == shift) {
15063073774eSSerhii Popovych /* Suitable resize in progress? */
15075e985969SDavid Gibson ret = resize->error;
15083073774eSSerhii Popovych if (ret == -EBUSY)
15095e985969SDavid Gibson ret = 100; /* estimated time in ms */
15103073774eSSerhii Popovych else if (ret)
15113073774eSSerhii Popovych resize_hpt_release(kvm, resize);
15125e985969SDavid Gibson
15135e985969SDavid Gibson goto out;
15145e985969SDavid Gibson }
15155e985969SDavid Gibson
15165e985969SDavid Gibson /* not suitable, cancel it */
15175e985969SDavid Gibson resize_hpt_release(kvm, resize);
15185e985969SDavid Gibson }
15195e985969SDavid Gibson
15205e985969SDavid Gibson ret = 0;
15215e985969SDavid Gibson if (!shift)
15225e985969SDavid Gibson goto out; /* nothing to do */
15235e985969SDavid Gibson
15245e985969SDavid Gibson /* start new resize */
15255e985969SDavid Gibson
15265e985969SDavid Gibson resize = kzalloc(sizeof(*resize), GFP_KERNEL);
1527abd80dcbSDan Carpenter if (!resize) {
1528abd80dcbSDan Carpenter ret = -ENOMEM;
1529abd80dcbSDan Carpenter goto out;
1530abd80dcbSDan Carpenter }
15313073774eSSerhii Popovych
15323073774eSSerhii Popovych resize->error = -EBUSY;
15335e985969SDavid Gibson resize->order = shift;
15345e985969SDavid Gibson resize->kvm = kvm;
15355e985969SDavid Gibson INIT_WORK(&resize->work, resize_hpt_prepare_work);
15365e985969SDavid Gibson kvm->arch.resize_hpt = resize;
15375e985969SDavid Gibson
15385e985969SDavid Gibson schedule_work(&resize->work);
15395e985969SDavid Gibson
15405e985969SDavid Gibson ret = 100; /* estimated time in ms */
15415e985969SDavid Gibson
15425e985969SDavid Gibson out:
15430d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
15445e985969SDavid Gibson return ret;
15455e985969SDavid Gibson }
15465e985969SDavid Gibson
resize_hpt_boot_vcpu(void * opaque)15475e985969SDavid Gibson static void resize_hpt_boot_vcpu(void *opaque)
15485e985969SDavid Gibson {
15495e985969SDavid Gibson /* Nothing to do, just force a KVM exit */
15505e985969SDavid Gibson }
15515e985969SDavid Gibson
kvm_vm_ioctl_resize_hpt_commit(struct kvm * kvm,struct kvm_ppc_resize_hpt * rhpt)155267c48662SThomas Huth int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
15535e985969SDavid Gibson struct kvm_ppc_resize_hpt *rhpt)
15545e985969SDavid Gibson {
15555e985969SDavid Gibson unsigned long flags = rhpt->flags;
15565e985969SDavid Gibson unsigned long shift = rhpt->shift;
15575e985969SDavid Gibson struct kvm_resize_hpt *resize;
155867c48662SThomas Huth int ret;
15595e985969SDavid Gibson
1560891f1ebfSPaul Mackerras if (flags != 0 || kvm_is_radix(kvm))
15615e985969SDavid Gibson return -EINVAL;
15625e985969SDavid Gibson
15635e985969SDavid Gibson if (shift && ((shift < 18) || (shift > 46)))
15645e985969SDavid Gibson return -EINVAL;
15655e985969SDavid Gibson
15660d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
15675e985969SDavid Gibson
15685e985969SDavid Gibson resize = kvm->arch.resize_hpt;
15695e985969SDavid Gibson
15705e985969SDavid Gibson /* This shouldn't be possible */
15715e985969SDavid Gibson ret = -EIO;
15721b151ce4SPaul Mackerras if (WARN_ON(!kvm->arch.mmu_ready))
15735e985969SDavid Gibson goto out_no_hpt;
15745e985969SDavid Gibson
15755e985969SDavid Gibson /* Stop VCPUs from running while we mess with the HPT */
15761b151ce4SPaul Mackerras kvm->arch.mmu_ready = 0;
15775e985969SDavid Gibson smp_mb();
15785e985969SDavid Gibson
15795e985969SDavid Gibson /* Boot all CPUs out of the guest so they re-read
15801b151ce4SPaul Mackerras * mmu_ready */
15815e985969SDavid Gibson on_each_cpu(resize_hpt_boot_vcpu, NULL, 1);
15825e985969SDavid Gibson
15835e985969SDavid Gibson ret = -ENXIO;
15845e985969SDavid Gibson if (!resize || (resize->order != shift))
15855e985969SDavid Gibson goto out;
15865e985969SDavid Gibson
15875e985969SDavid Gibson ret = resize->error;
15883073774eSSerhii Popovych if (ret)
15895e985969SDavid Gibson goto out;
15905e985969SDavid Gibson
15915e985969SDavid Gibson ret = resize_hpt_rehash(resize);
15923073774eSSerhii Popovych if (ret)
15935e985969SDavid Gibson goto out;
15945e985969SDavid Gibson
15955e985969SDavid Gibson resize_hpt_pivot(resize);
15965e985969SDavid Gibson
15975e985969SDavid Gibson out:
15985e985969SDavid Gibson /* Let VCPUs run again */
15991b151ce4SPaul Mackerras kvm->arch.mmu_ready = 1;
16005e985969SDavid Gibson smp_mb();
16015e985969SDavid Gibson out_no_hpt:
16025e985969SDavid Gibson resize_hpt_release(kvm, resize);
16030d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
16045e985969SDavid Gibson return ret;
16055e985969SDavid Gibson }
16065e985969SDavid Gibson
16075e985969SDavid Gibson /*
1608a2932923SPaul Mackerras * Functions for reading and writing the hash table via reads and
1609a2932923SPaul Mackerras * writes on a file descriptor.
1610a2932923SPaul Mackerras *
1611a2932923SPaul Mackerras * Reads return the guest view of the hash table, which has to be
1612a2932923SPaul Mackerras * pieced together from the real hash table and the guest_rpte
1613a2932923SPaul Mackerras * values in the revmap array.
1614a2932923SPaul Mackerras *
1615a2932923SPaul Mackerras * On writes, each HPTE written is considered in turn, and if it
1616a2932923SPaul Mackerras * is valid, it is written to the HPT as if an H_ENTER with the
1617a2932923SPaul Mackerras * exact flag set was done. When the invalid count is non-zero
1618a2932923SPaul Mackerras * in the header written to the stream, the kernel will make
1619a2932923SPaul Mackerras * sure that that many HPTEs are invalid, and invalidate them
1620a2932923SPaul Mackerras * if not.
1621a2932923SPaul Mackerras */
1622a2932923SPaul Mackerras
1623a2932923SPaul Mackerras struct kvm_htab_ctx {
1624a2932923SPaul Mackerras unsigned long index;
1625a2932923SPaul Mackerras unsigned long flags;
1626a2932923SPaul Mackerras struct kvm *kvm;
1627a2932923SPaul Mackerras int first_pass;
1628a2932923SPaul Mackerras };
1629a2932923SPaul Mackerras
1630a2932923SPaul Mackerras #define HPTE_SIZE (2 * sizeof(unsigned long))
1631a2932923SPaul Mackerras
1632a1b4a0f6SPaul Mackerras /*
1633a1b4a0f6SPaul Mackerras * Returns 1 if this HPT entry has been modified or has pending
1634a1b4a0f6SPaul Mackerras * R/C bit changes.
1635a1b4a0f6SPaul Mackerras */
hpte_dirty(struct revmap_entry * revp,__be64 * hptp)16366f22bd32SAlexander Graf static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp)
1637a1b4a0f6SPaul Mackerras {
1638a1b4a0f6SPaul Mackerras unsigned long rcbits_unset;
1639a1b4a0f6SPaul Mackerras
1640a1b4a0f6SPaul Mackerras if (revp->guest_rpte & HPTE_GR_MODIFIED)
1641a1b4a0f6SPaul Mackerras return 1;
1642a1b4a0f6SPaul Mackerras
1643a1b4a0f6SPaul Mackerras /* Also need to consider changes in reference and changed bits */
1644a1b4a0f6SPaul Mackerras rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
16456f22bd32SAlexander Graf if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) &&
16466f22bd32SAlexander Graf (be64_to_cpu(hptp[1]) & rcbits_unset))
1647a1b4a0f6SPaul Mackerras return 1;
1648a1b4a0f6SPaul Mackerras
1649a1b4a0f6SPaul Mackerras return 0;
1650a1b4a0f6SPaul Mackerras }
1651a1b4a0f6SPaul Mackerras
record_hpte(unsigned long flags,__be64 * hptp,unsigned long * hpte,struct revmap_entry * revp,int want_valid,int first_pass)16526f22bd32SAlexander Graf static long record_hpte(unsigned long flags, __be64 *hptp,
1653a2932923SPaul Mackerras unsigned long *hpte, struct revmap_entry *revp,
1654a2932923SPaul Mackerras int want_valid, int first_pass)
1655a2932923SPaul Mackerras {
1656abb7c7ddSPaul Mackerras unsigned long v, r, hr;
1657a1b4a0f6SPaul Mackerras unsigned long rcbits_unset;
1658a2932923SPaul Mackerras int ok = 1;
1659a2932923SPaul Mackerras int valid, dirty;
1660a2932923SPaul Mackerras
1661a2932923SPaul Mackerras /* Unmodified entries are uninteresting except on the first pass */
1662a1b4a0f6SPaul Mackerras dirty = hpte_dirty(revp, hptp);
1663a2932923SPaul Mackerras if (!first_pass && !dirty)
1664a2932923SPaul Mackerras return 0;
1665a2932923SPaul Mackerras
1666a2932923SPaul Mackerras valid = 0;
16676f22bd32SAlexander Graf if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1668a2932923SPaul Mackerras valid = 1;
1669a2932923SPaul Mackerras if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
16706f22bd32SAlexander Graf !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED))
1671a2932923SPaul Mackerras valid = 0;
1672a2932923SPaul Mackerras }
1673a2932923SPaul Mackerras if (valid != want_valid)
1674a2932923SPaul Mackerras return 0;
1675a2932923SPaul Mackerras
1676a2932923SPaul Mackerras v = r = 0;
1677a2932923SPaul Mackerras if (valid || dirty) {
1678a2932923SPaul Mackerras /* lock the HPTE so it's stable and read it */
1679a2932923SPaul Mackerras preempt_disable();
1680a2932923SPaul Mackerras while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1681a2932923SPaul Mackerras cpu_relax();
16826f22bd32SAlexander Graf v = be64_to_cpu(hptp[0]);
1683abb7c7ddSPaul Mackerras hr = be64_to_cpu(hptp[1]);
1684abb7c7ddSPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1685abb7c7ddSPaul Mackerras v = hpte_new_to_old_v(v, hr);
1686abb7c7ddSPaul Mackerras hr = hpte_new_to_old_r(hr);
1687abb7c7ddSPaul Mackerras }
1688a1b4a0f6SPaul Mackerras
1689a1b4a0f6SPaul Mackerras /* re-evaluate valid and dirty from synchronized HPTE value */
1690a1b4a0f6SPaul Mackerras valid = !!(v & HPTE_V_VALID);
1691a1b4a0f6SPaul Mackerras dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1692a1b4a0f6SPaul Mackerras
1693a1b4a0f6SPaul Mackerras /* Harvest R and C into guest view if necessary */
1694a1b4a0f6SPaul Mackerras rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1695abb7c7ddSPaul Mackerras if (valid && (rcbits_unset & hr)) {
1696abb7c7ddSPaul Mackerras revp->guest_rpte |= (hr &
16976f22bd32SAlexander Graf (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED;
1698a1b4a0f6SPaul Mackerras dirty = 1;
1699a1b4a0f6SPaul Mackerras }
1700a1b4a0f6SPaul Mackerras
1701a2932923SPaul Mackerras if (v & HPTE_V_ABSENT) {
1702a2932923SPaul Mackerras v &= ~HPTE_V_ABSENT;
1703a2932923SPaul Mackerras v |= HPTE_V_VALID;
1704a1b4a0f6SPaul Mackerras valid = 1;
1705a2932923SPaul Mackerras }
1706a2932923SPaul Mackerras if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1707a2932923SPaul Mackerras valid = 0;
1708a1b4a0f6SPaul Mackerras
1709a1b4a0f6SPaul Mackerras r = revp->guest_rpte;
1710a2932923SPaul Mackerras /* only clear modified if this is the right sort of entry */
1711a2932923SPaul Mackerras if (valid == want_valid && dirty) {
1712a2932923SPaul Mackerras r &= ~HPTE_GR_MODIFIED;
1713a2932923SPaul Mackerras revp->guest_rpte = r;
1714a2932923SPaul Mackerras }
1715a4bd6eb0SAneesh Kumar K.V unlock_hpte(hptp, be64_to_cpu(hptp[0]));
1716a2932923SPaul Mackerras preempt_enable();
1717a2932923SPaul Mackerras if (!(valid == want_valid && (first_pass || dirty)))
1718a2932923SPaul Mackerras ok = 0;
1719a2932923SPaul Mackerras }
17206f22bd32SAlexander Graf hpte[0] = cpu_to_be64(v);
17216f22bd32SAlexander Graf hpte[1] = cpu_to_be64(r);
1722a2932923SPaul Mackerras return ok;
1723a2932923SPaul Mackerras }
1724a2932923SPaul Mackerras
kvm_htab_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1725a2932923SPaul Mackerras static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1726a2932923SPaul Mackerras size_t count, loff_t *ppos)
1727a2932923SPaul Mackerras {
1728a2932923SPaul Mackerras struct kvm_htab_ctx *ctx = file->private_data;
1729a2932923SPaul Mackerras struct kvm *kvm = ctx->kvm;
1730a2932923SPaul Mackerras struct kvm_get_htab_header hdr;
17316f22bd32SAlexander Graf __be64 *hptp;
1732a2932923SPaul Mackerras struct revmap_entry *revp;
1733a2932923SPaul Mackerras unsigned long i, nb, nw;
1734a2932923SPaul Mackerras unsigned long __user *lbuf;
1735a2932923SPaul Mackerras struct kvm_get_htab_header __user *hptr;
1736a2932923SPaul Mackerras unsigned long flags;
1737a2932923SPaul Mackerras int first_pass;
1738a2932923SPaul Mackerras unsigned long hpte[2];
1739a2932923SPaul Mackerras
174096d4f267SLinus Torvalds if (!access_ok(buf, count))
1741a2932923SPaul Mackerras return -EFAULT;
1742891f1ebfSPaul Mackerras if (kvm_is_radix(kvm))
1743891f1ebfSPaul Mackerras return 0;
1744a2932923SPaul Mackerras
1745a2932923SPaul Mackerras first_pass = ctx->first_pass;
1746a2932923SPaul Mackerras flags = ctx->flags;
1747a2932923SPaul Mackerras
1748a2932923SPaul Mackerras i = ctx->index;
17493f9d4f5aSDavid Gibson hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
17503f9d4f5aSDavid Gibson revp = kvm->arch.hpt.rev + i;
1751a2932923SPaul Mackerras lbuf = (unsigned long __user *)buf;
1752a2932923SPaul Mackerras
1753a2932923SPaul Mackerras nb = 0;
1754a2932923SPaul Mackerras while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1755a2932923SPaul Mackerras /* Initialize header */
1756a2932923SPaul Mackerras hptr = (struct kvm_get_htab_header __user *)buf;
1757a2932923SPaul Mackerras hdr.n_valid = 0;
1758a2932923SPaul Mackerras hdr.n_invalid = 0;
1759a2932923SPaul Mackerras nw = nb;
1760a2932923SPaul Mackerras nb += sizeof(hdr);
1761a2932923SPaul Mackerras lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1762a2932923SPaul Mackerras
1763a2932923SPaul Mackerras /* Skip uninteresting entries, i.e. clean on not-first pass */
1764a2932923SPaul Mackerras if (!first_pass) {
17653d089f84SDavid Gibson while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1766a1b4a0f6SPaul Mackerras !hpte_dirty(revp, hptp)) {
1767a2932923SPaul Mackerras ++i;
1768a2932923SPaul Mackerras hptp += 2;
1769a2932923SPaul Mackerras ++revp;
1770a2932923SPaul Mackerras }
1771a2932923SPaul Mackerras }
177205dd85f7SPaul Mackerras hdr.index = i;
1773a2932923SPaul Mackerras
1774a2932923SPaul Mackerras /* Grab a series of valid entries */
17753d089f84SDavid Gibson while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1776a2932923SPaul Mackerras hdr.n_valid < 0xffff &&
1777a2932923SPaul Mackerras nb + HPTE_SIZE < count &&
1778a2932923SPaul Mackerras record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1779a2932923SPaul Mackerras /* valid entry, write it out */
1780a2932923SPaul Mackerras ++hdr.n_valid;
1781a2932923SPaul Mackerras if (__put_user(hpte[0], lbuf) ||
1782a2932923SPaul Mackerras __put_user(hpte[1], lbuf + 1))
1783a2932923SPaul Mackerras return -EFAULT;
1784a2932923SPaul Mackerras nb += HPTE_SIZE;
1785a2932923SPaul Mackerras lbuf += 2;
1786a2932923SPaul Mackerras ++i;
1787a2932923SPaul Mackerras hptp += 2;
1788a2932923SPaul Mackerras ++revp;
1789a2932923SPaul Mackerras }
1790a2932923SPaul Mackerras /* Now skip invalid entries while we can */
17913d089f84SDavid Gibson while (i < kvmppc_hpt_npte(&kvm->arch.hpt) &&
1792a2932923SPaul Mackerras hdr.n_invalid < 0xffff &&
1793a2932923SPaul Mackerras record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1794a2932923SPaul Mackerras /* found an invalid entry */
1795a2932923SPaul Mackerras ++hdr.n_invalid;
1796a2932923SPaul Mackerras ++i;
1797a2932923SPaul Mackerras hptp += 2;
1798a2932923SPaul Mackerras ++revp;
1799a2932923SPaul Mackerras }
1800a2932923SPaul Mackerras
1801a2932923SPaul Mackerras if (hdr.n_valid || hdr.n_invalid) {
1802a2932923SPaul Mackerras /* write back the header */
1803a2932923SPaul Mackerras if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1804a2932923SPaul Mackerras return -EFAULT;
1805a2932923SPaul Mackerras nw = nb;
1806a2932923SPaul Mackerras buf = (char __user *)lbuf;
1807a2932923SPaul Mackerras } else {
1808a2932923SPaul Mackerras nb = nw;
1809a2932923SPaul Mackerras }
1810a2932923SPaul Mackerras
1811a2932923SPaul Mackerras /* Check if we've wrapped around the hash table */
18123d089f84SDavid Gibson if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
1813a2932923SPaul Mackerras i = 0;
1814a2932923SPaul Mackerras ctx->first_pass = 0;
1815a2932923SPaul Mackerras break;
1816a2932923SPaul Mackerras }
1817a2932923SPaul Mackerras }
1818a2932923SPaul Mackerras
1819a2932923SPaul Mackerras ctx->index = i;
1820a2932923SPaul Mackerras
1821a2932923SPaul Mackerras return nb;
1822a2932923SPaul Mackerras }
1823a2932923SPaul Mackerras
kvm_htab_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1824a2932923SPaul Mackerras static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1825a2932923SPaul Mackerras size_t count, loff_t *ppos)
1826a2932923SPaul Mackerras {
1827a2932923SPaul Mackerras struct kvm_htab_ctx *ctx = file->private_data;
1828a2932923SPaul Mackerras struct kvm *kvm = ctx->kvm;
1829a2932923SPaul Mackerras struct kvm_get_htab_header hdr;
1830a2932923SPaul Mackerras unsigned long i, j;
1831a2932923SPaul Mackerras unsigned long v, r;
1832a2932923SPaul Mackerras unsigned long __user *lbuf;
18336f22bd32SAlexander Graf __be64 *hptp;
1834a2932923SPaul Mackerras unsigned long tmp[2];
1835a2932923SPaul Mackerras ssize_t nb;
1836a2932923SPaul Mackerras long int err, ret;
18371b151ce4SPaul Mackerras int mmu_ready;
1838ded13fc1SPaul Mackerras int pshift;
1839a2932923SPaul Mackerras
184096d4f267SLinus Torvalds if (!access_ok(buf, count))
1841a2932923SPaul Mackerras return -EFAULT;
1842891f1ebfSPaul Mackerras if (kvm_is_radix(kvm))
1843891f1ebfSPaul Mackerras return -EINVAL;
1844a2932923SPaul Mackerras
1845a2932923SPaul Mackerras /* lock out vcpus from running while we're doing this */
18460d4ee88dSPaul Mackerras mutex_lock(&kvm->arch.mmu_setup_lock);
18471b151ce4SPaul Mackerras mmu_ready = kvm->arch.mmu_ready;
18481b151ce4SPaul Mackerras if (mmu_ready) {
18491b151ce4SPaul Mackerras kvm->arch.mmu_ready = 0; /* temporarily */
18501b151ce4SPaul Mackerras /* order mmu_ready vs. vcpus_running */
1851a2932923SPaul Mackerras smp_mb();
1852a2932923SPaul Mackerras if (atomic_read(&kvm->arch.vcpus_running)) {
18531b151ce4SPaul Mackerras kvm->arch.mmu_ready = 1;
18540d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
1855a2932923SPaul Mackerras return -EBUSY;
1856a2932923SPaul Mackerras }
1857a2932923SPaul Mackerras }
1858a2932923SPaul Mackerras
1859a2932923SPaul Mackerras err = 0;
1860a2932923SPaul Mackerras for (nb = 0; nb + sizeof(hdr) <= count; ) {
1861a2932923SPaul Mackerras err = -EFAULT;
1862a2932923SPaul Mackerras if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1863a2932923SPaul Mackerras break;
1864a2932923SPaul Mackerras
1865a2932923SPaul Mackerras err = 0;
1866a2932923SPaul Mackerras if (nb + hdr.n_valid * HPTE_SIZE > count)
1867a2932923SPaul Mackerras break;
1868a2932923SPaul Mackerras
1869a2932923SPaul Mackerras nb += sizeof(hdr);
1870a2932923SPaul Mackerras buf += sizeof(hdr);
1871a2932923SPaul Mackerras
1872a2932923SPaul Mackerras err = -EINVAL;
1873a2932923SPaul Mackerras i = hdr.index;
18743d089f84SDavid Gibson if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) ||
18753d089f84SDavid Gibson i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt))
1876a2932923SPaul Mackerras break;
1877a2932923SPaul Mackerras
18783f9d4f5aSDavid Gibson hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
1879a2932923SPaul Mackerras lbuf = (unsigned long __user *)buf;
1880a2932923SPaul Mackerras for (j = 0; j < hdr.n_valid; ++j) {
1881ffada016SCédric Le Goater __be64 hpte_v;
1882ffada016SCédric Le Goater __be64 hpte_r;
1883ffada016SCédric Le Goater
1884a2932923SPaul Mackerras err = -EFAULT;
1885ffada016SCédric Le Goater if (__get_user(hpte_v, lbuf) ||
1886ffada016SCédric Le Goater __get_user(hpte_r, lbuf + 1))
1887a2932923SPaul Mackerras goto out;
1888ffada016SCédric Le Goater v = be64_to_cpu(hpte_v);
1889ffada016SCédric Le Goater r = be64_to_cpu(hpte_r);
1890a2932923SPaul Mackerras err = -EINVAL;
1891a2932923SPaul Mackerras if (!(v & HPTE_V_VALID))
1892a2932923SPaul Mackerras goto out;
1893ded13fc1SPaul Mackerras pshift = kvmppc_hpte_base_page_shift(v, r);
1894ded13fc1SPaul Mackerras if (pshift <= 0)
1895ded13fc1SPaul Mackerras goto out;
1896a2932923SPaul Mackerras lbuf += 2;
1897a2932923SPaul Mackerras nb += HPTE_SIZE;
1898a2932923SPaul Mackerras
18996f22bd32SAlexander Graf if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1900a2932923SPaul Mackerras kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1901a2932923SPaul Mackerras err = -EIO;
1902a2932923SPaul Mackerras ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1903a2932923SPaul Mackerras tmp);
1904a2932923SPaul Mackerras if (ret != H_SUCCESS) {
190561119786SXueBing Chen pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r);
1906a2932923SPaul Mackerras goto out;
1907a2932923SPaul Mackerras }
19081b151ce4SPaul Mackerras if (!mmu_ready && is_vrma_hpte(v)) {
1909ded13fc1SPaul Mackerras unsigned long senc, lpcr;
1910a2932923SPaul Mackerras
1911ded13fc1SPaul Mackerras senc = slb_pgsize_encoding(1ul << pshift);
1912a2932923SPaul Mackerras kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1913a2932923SPaul Mackerras (VRMA_VSID << SLB_VSID_SHIFT_1T);
1914ded13fc1SPaul Mackerras if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1915a0144e2aSPaul Mackerras lpcr = senc << (LPCR_VRMASD_SH - 4);
1916ded13fc1SPaul Mackerras kvmppc_update_lpcr(kvm, lpcr,
1917ded13fc1SPaul Mackerras LPCR_VRMASD);
1918ded13fc1SPaul Mackerras } else {
1919ded13fc1SPaul Mackerras kvmppc_setup_partition_table(kvm);
1920ded13fc1SPaul Mackerras }
19211b151ce4SPaul Mackerras mmu_ready = 1;
1922a2932923SPaul Mackerras }
1923a2932923SPaul Mackerras ++i;
1924a2932923SPaul Mackerras hptp += 2;
1925a2932923SPaul Mackerras }
1926a2932923SPaul Mackerras
1927a2932923SPaul Mackerras for (j = 0; j < hdr.n_invalid; ++j) {
19286f22bd32SAlexander Graf if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))
1929a2932923SPaul Mackerras kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1930a2932923SPaul Mackerras ++i;
1931a2932923SPaul Mackerras hptp += 2;
1932a2932923SPaul Mackerras }
1933a2932923SPaul Mackerras err = 0;
1934a2932923SPaul Mackerras }
1935a2932923SPaul Mackerras
1936a2932923SPaul Mackerras out:
19371b151ce4SPaul Mackerras /* Order HPTE updates vs. mmu_ready */
1938a2932923SPaul Mackerras smp_wmb();
19391b151ce4SPaul Mackerras kvm->arch.mmu_ready = mmu_ready;
19400d4ee88dSPaul Mackerras mutex_unlock(&kvm->arch.mmu_setup_lock);
1941a2932923SPaul Mackerras
1942a2932923SPaul Mackerras if (err)
1943a2932923SPaul Mackerras return err;
1944a2932923SPaul Mackerras return nb;
1945a2932923SPaul Mackerras }
1946a2932923SPaul Mackerras
kvm_htab_release(struct inode * inode,struct file * filp)1947a2932923SPaul Mackerras static int kvm_htab_release(struct inode *inode, struct file *filp)
1948a2932923SPaul Mackerras {
1949a2932923SPaul Mackerras struct kvm_htab_ctx *ctx = filp->private_data;
1950a2932923SPaul Mackerras
1951a2932923SPaul Mackerras filp->private_data = NULL;
1952a2932923SPaul Mackerras if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1953a2932923SPaul Mackerras atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1954a2932923SPaul Mackerras kvm_put_kvm(ctx->kvm);
1955a2932923SPaul Mackerras kfree(ctx);
1956a2932923SPaul Mackerras return 0;
1957a2932923SPaul Mackerras }
1958a2932923SPaul Mackerras
195975ef9de1SAl Viro static const struct file_operations kvm_htab_fops = {
1960a2932923SPaul Mackerras .read = kvm_htab_read,
1961a2932923SPaul Mackerras .write = kvm_htab_write,
1962a2932923SPaul Mackerras .llseek = default_llseek,
1963a2932923SPaul Mackerras .release = kvm_htab_release,
1964a2932923SPaul Mackerras };
1965a2932923SPaul Mackerras
kvm_vm_ioctl_get_htab_fd(struct kvm * kvm,struct kvm_get_htab_fd * ghf)1966a2932923SPaul Mackerras int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1967a2932923SPaul Mackerras {
1968a2932923SPaul Mackerras int ret;
1969a2932923SPaul Mackerras struct kvm_htab_ctx *ctx;
1970a2932923SPaul Mackerras int rwflag;
1971a2932923SPaul Mackerras
1972a2932923SPaul Mackerras /* reject flags we don't recognize */
1973a2932923SPaul Mackerras if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1974a2932923SPaul Mackerras return -EINVAL;
1975a2932923SPaul Mackerras ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1976a2932923SPaul Mackerras if (!ctx)
1977a2932923SPaul Mackerras return -ENOMEM;
1978a2932923SPaul Mackerras kvm_get_kvm(kvm);
1979a2932923SPaul Mackerras ctx->kvm = kvm;
1980a2932923SPaul Mackerras ctx->index = ghf->start_index;
1981a2932923SPaul Mackerras ctx->flags = ghf->flags;
1982a2932923SPaul Mackerras ctx->first_pass = 1;
1983a2932923SPaul Mackerras
1984a2932923SPaul Mackerras rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
19852f84d5eaSYann Droneaud ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1986a2932923SPaul Mackerras if (ret < 0) {
198743f6b0cfSnixiaoming kfree(ctx);
1988149487bdSSean Christopherson kvm_put_kvm_no_destroy(kvm);
1989a2932923SPaul Mackerras return ret;
1990a2932923SPaul Mackerras }
1991a2932923SPaul Mackerras
1992a2932923SPaul Mackerras if (rwflag == O_RDONLY) {
1993a2932923SPaul Mackerras mutex_lock(&kvm->slots_lock);
1994a2932923SPaul Mackerras atomic_inc(&kvm->arch.hpte_mod_interest);
1995a2932923SPaul Mackerras /* make sure kvmppc_do_h_enter etc. see the increment */
1996a2932923SPaul Mackerras synchronize_srcu_expedited(&kvm->srcu);
1997a2932923SPaul Mackerras mutex_unlock(&kvm->slots_lock);
1998a2932923SPaul Mackerras }
1999a2932923SPaul Mackerras
2000a2932923SPaul Mackerras return ret;
2001a2932923SPaul Mackerras }
2002a2932923SPaul Mackerras
2003e23a808bSPaul Mackerras struct debugfs_htab_state {
2004e23a808bSPaul Mackerras struct kvm *kvm;
2005e23a808bSPaul Mackerras struct mutex mutex;
2006e23a808bSPaul Mackerras unsigned long hpt_index;
2007e23a808bSPaul Mackerras int chars_left;
2008e23a808bSPaul Mackerras int buf_index;
2009e23a808bSPaul Mackerras char buf[64];
2010e23a808bSPaul Mackerras };
2011e23a808bSPaul Mackerras
debugfs_htab_open(struct inode * inode,struct file * file)2012e23a808bSPaul Mackerras static int debugfs_htab_open(struct inode *inode, struct file *file)
2013e23a808bSPaul Mackerras {
2014e23a808bSPaul Mackerras struct kvm *kvm = inode->i_private;
2015e23a808bSPaul Mackerras struct debugfs_htab_state *p;
2016e23a808bSPaul Mackerras
2017e23a808bSPaul Mackerras p = kzalloc(sizeof(*p), GFP_KERNEL);
2018e23a808bSPaul Mackerras if (!p)
2019e23a808bSPaul Mackerras return -ENOMEM;
2020e23a808bSPaul Mackerras
2021e23a808bSPaul Mackerras kvm_get_kvm(kvm);
2022e23a808bSPaul Mackerras p->kvm = kvm;
2023e23a808bSPaul Mackerras mutex_init(&p->mutex);
2024e23a808bSPaul Mackerras file->private_data = p;
2025e23a808bSPaul Mackerras
2026e23a808bSPaul Mackerras return nonseekable_open(inode, file);
2027e23a808bSPaul Mackerras }
2028e23a808bSPaul Mackerras
debugfs_htab_release(struct inode * inode,struct file * file)2029e23a808bSPaul Mackerras static int debugfs_htab_release(struct inode *inode, struct file *file)
2030e23a808bSPaul Mackerras {
2031e23a808bSPaul Mackerras struct debugfs_htab_state *p = file->private_data;
2032e23a808bSPaul Mackerras
2033e23a808bSPaul Mackerras kvm_put_kvm(p->kvm);
2034e23a808bSPaul Mackerras kfree(p);
2035e23a808bSPaul Mackerras return 0;
2036e23a808bSPaul Mackerras }
2037e23a808bSPaul Mackerras
debugfs_htab_read(struct file * file,char __user * buf,size_t len,loff_t * ppos)2038e23a808bSPaul Mackerras static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
2039e23a808bSPaul Mackerras size_t len, loff_t *ppos)
2040e23a808bSPaul Mackerras {
2041e23a808bSPaul Mackerras struct debugfs_htab_state *p = file->private_data;
2042e23a808bSPaul Mackerras ssize_t ret, r;
2043e23a808bSPaul Mackerras unsigned long i, n;
2044e23a808bSPaul Mackerras unsigned long v, hr, gr;
2045e23a808bSPaul Mackerras struct kvm *kvm;
2046e23a808bSPaul Mackerras __be64 *hptp;
2047e23a808bSPaul Mackerras
2048891f1ebfSPaul Mackerras kvm = p->kvm;
2049891f1ebfSPaul Mackerras if (kvm_is_radix(kvm))
2050891f1ebfSPaul Mackerras return 0;
2051891f1ebfSPaul Mackerras
2052e23a808bSPaul Mackerras ret = mutex_lock_interruptible(&p->mutex);
2053e23a808bSPaul Mackerras if (ret)
2054e23a808bSPaul Mackerras return ret;
2055e23a808bSPaul Mackerras
2056e23a808bSPaul Mackerras if (p->chars_left) {
2057e23a808bSPaul Mackerras n = p->chars_left;
2058e23a808bSPaul Mackerras if (n > len)
2059e23a808bSPaul Mackerras n = len;
2060e23a808bSPaul Mackerras r = copy_to_user(buf, p->buf + p->buf_index, n);
2061e23a808bSPaul Mackerras n -= r;
2062e23a808bSPaul Mackerras p->chars_left -= n;
2063e23a808bSPaul Mackerras p->buf_index += n;
2064e23a808bSPaul Mackerras buf += n;
2065e23a808bSPaul Mackerras len -= n;
2066e23a808bSPaul Mackerras ret = n;
2067e23a808bSPaul Mackerras if (r) {
2068e23a808bSPaul Mackerras if (!n)
2069e23a808bSPaul Mackerras ret = -EFAULT;
2070e23a808bSPaul Mackerras goto out;
2071e23a808bSPaul Mackerras }
2072e23a808bSPaul Mackerras }
2073e23a808bSPaul Mackerras
2074e23a808bSPaul Mackerras i = p->hpt_index;
20753f9d4f5aSDavid Gibson hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
20763d089f84SDavid Gibson for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt);
20773d089f84SDavid Gibson ++i, hptp += 2) {
2078e23a808bSPaul Mackerras if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
2079e23a808bSPaul Mackerras continue;
2080e23a808bSPaul Mackerras
2081e23a808bSPaul Mackerras /* lock the HPTE so it's stable and read it */
2082e23a808bSPaul Mackerras preempt_disable();
2083e23a808bSPaul Mackerras while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
2084e23a808bSPaul Mackerras cpu_relax();
2085e23a808bSPaul Mackerras v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
2086e23a808bSPaul Mackerras hr = be64_to_cpu(hptp[1]);
20873f9d4f5aSDavid Gibson gr = kvm->arch.hpt.rev[i].guest_rpte;
2088e23a808bSPaul Mackerras unlock_hpte(hptp, v);
2089e23a808bSPaul Mackerras preempt_enable();
2090e23a808bSPaul Mackerras
2091e23a808bSPaul Mackerras if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
2092e23a808bSPaul Mackerras continue;
2093e23a808bSPaul Mackerras
2094e23a808bSPaul Mackerras n = scnprintf(p->buf, sizeof(p->buf),
2095e23a808bSPaul Mackerras "%6lx %.16lx %.16lx %.16lx\n",
2096e23a808bSPaul Mackerras i, v, hr, gr);
2097e23a808bSPaul Mackerras p->chars_left = n;
2098e23a808bSPaul Mackerras if (n > len)
2099e23a808bSPaul Mackerras n = len;
2100e23a808bSPaul Mackerras r = copy_to_user(buf, p->buf, n);
2101e23a808bSPaul Mackerras n -= r;
2102e23a808bSPaul Mackerras p->chars_left -= n;
2103e23a808bSPaul Mackerras p->buf_index = n;
2104e23a808bSPaul Mackerras buf += n;
2105e23a808bSPaul Mackerras len -= n;
2106e23a808bSPaul Mackerras ret += n;
2107e23a808bSPaul Mackerras if (r) {
2108e23a808bSPaul Mackerras if (!ret)
2109e23a808bSPaul Mackerras ret = -EFAULT;
2110e23a808bSPaul Mackerras goto out;
2111e23a808bSPaul Mackerras }
2112e23a808bSPaul Mackerras }
2113e23a808bSPaul Mackerras p->hpt_index = i;
2114e23a808bSPaul Mackerras
2115e23a808bSPaul Mackerras out:
2116e23a808bSPaul Mackerras mutex_unlock(&p->mutex);
2117e23a808bSPaul Mackerras return ret;
2118e23a808bSPaul Mackerras }
2119e23a808bSPaul Mackerras
debugfs_htab_write(struct file * file,const char __user * buf,size_t len,loff_t * ppos)2120025c9511SDaniel Axtens static ssize_t debugfs_htab_write(struct file *file, const char __user *buf,
2121e23a808bSPaul Mackerras size_t len, loff_t *ppos)
2122e23a808bSPaul Mackerras {
2123e23a808bSPaul Mackerras return -EACCES;
2124e23a808bSPaul Mackerras }
2125e23a808bSPaul Mackerras
2126e23a808bSPaul Mackerras static const struct file_operations debugfs_htab_fops = {
2127e23a808bSPaul Mackerras .owner = THIS_MODULE,
2128e23a808bSPaul Mackerras .open = debugfs_htab_open,
2129e23a808bSPaul Mackerras .release = debugfs_htab_release,
2130e23a808bSPaul Mackerras .read = debugfs_htab_read,
2131e23a808bSPaul Mackerras .write = debugfs_htab_write,
2132e23a808bSPaul Mackerras .llseek = generic_file_llseek,
2133e23a808bSPaul Mackerras };
2134e23a808bSPaul Mackerras
kvmppc_mmu_debugfs_init(struct kvm * kvm)2135e23a808bSPaul Mackerras void kvmppc_mmu_debugfs_init(struct kvm *kvm)
2136e23a808bSPaul Mackerras {
2137faf01aefSAlexey Kardashevskiy debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm,
2138e23a808bSPaul Mackerras &debugfs_htab_fops);
2139e23a808bSPaul Mackerras }
2140e23a808bSPaul Mackerras
kvmppc_mmu_book3s_hv_init(struct kvm_vcpu * vcpu)2141de56a948SPaul Mackerras void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
2142de56a948SPaul Mackerras {
2143de56a948SPaul Mackerras struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
2144de56a948SPaul Mackerras
2145c17b98cfSPaul Mackerras vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
2146de56a948SPaul Mackerras
2147de56a948SPaul Mackerras mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
2148de56a948SPaul Mackerras
2149de56a948SPaul Mackerras vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
2150de56a948SPaul Mackerras }
2151