1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 16 */ 17 18 #include <linux/types.h> 19 #include <linux/string.h> 20 #include <linux/kvm.h> 21 #include <linux/kvm_host.h> 22 #include <linux/highmem.h> 23 #include <linux/gfp.h> 24 #include <linux/slab.h> 25 #include <linux/hugetlb.h> 26 #include <linux/vmalloc.h> 27 #include <linux/srcu.h> 28 #include <linux/anon_inodes.h> 29 #include <linux/file.h> 30 #include <linux/debugfs.h> 31 32 #include <asm/tlbflush.h> 33 #include <asm/kvm_ppc.h> 34 #include <asm/kvm_book3s.h> 35 #include <asm/book3s/64/mmu-hash.h> 36 #include <asm/hvcall.h> 37 #include <asm/synch.h> 38 #include <asm/ppc-opcode.h> 39 #include <asm/cputable.h> 40 #include <asm/pte-walk.h> 41 42 #include "trace_hv.h" 43 44 //#define DEBUG_RESIZE_HPT 1 45 46 #ifdef DEBUG_RESIZE_HPT 47 #define resize_hpt_debug(resize, ...) \ 48 do { \ 49 printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ 50 printk(__VA_ARGS__); \ 51 } while (0) 52 #else 53 #define resize_hpt_debug(resize, ...) \ 54 do { } while (0) 55 #endif 56 57 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, 58 long pte_index, unsigned long pteh, 59 unsigned long ptel, unsigned long *pte_idx_ret); 60 61 struct kvm_resize_hpt { 62 /* These fields read-only after init */ 63 struct kvm *kvm; 64 struct work_struct work; 65 u32 order; 66 67 /* These fields protected by kvm->lock */ 68 int error; 69 bool prepare_done; 70 71 /* Private to the work thread, until prepare_done is true, 72 * then protected by kvm->resize_hpt_sem */ 73 struct kvm_hpt_info hpt; 74 }; 75 76 static void kvmppc_rmap_reset(struct kvm *kvm); 77 78 int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) 79 { 80 unsigned long hpt = 0; 81 int cma = 0; 82 struct page *page = NULL; 83 struct revmap_entry *rev; 84 unsigned long npte; 85 86 if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) 87 return -EINVAL; 88 89 page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); 90 if (page) { 91 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); 92 memset((void *)hpt, 0, (1ul << order)); 93 cma = 1; 94 } 95 96 if (!hpt) 97 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL 98 |__GFP_NOWARN, order - PAGE_SHIFT); 99 100 if (!hpt) 101 return -ENOMEM; 102 103 /* HPTEs are 2**4 bytes long */ 104 npte = 1ul << (order - 4); 105 106 /* Allocate reverse map array */ 107 rev = vmalloc(sizeof(struct revmap_entry) * npte); 108 if (!rev) { 109 pr_err("kvmppc_allocate_hpt: Couldn't alloc reverse map array\n"); 110 if (cma) 111 kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); 112 else 113 free_pages(hpt, order - PAGE_SHIFT); 114 return -ENOMEM; 115 } 116 117 info->order = order; 118 info->virt = hpt; 119 info->cma = cma; 120 info->rev = rev; 121 122 return 0; 123 } 124 125 void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) 126 { 127 atomic64_set(&kvm->arch.mmio_update, 0); 128 kvm->arch.hpt = *info; 129 kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); 130 131 pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", 132 info->virt, (long)info->order, kvm->arch.lpid); 133 } 134 135 long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) 136 { 137 long err = -EBUSY; 138 struct kvm_hpt_info info; 139 140 if (kvm_is_radix(kvm)) 141 return -EINVAL; 142 143 mutex_lock(&kvm->lock); 144 if (kvm->arch.hpte_setup_done) { 145 kvm->arch.hpte_setup_done = 0; 146 /* order hpte_setup_done vs. vcpus_running */ 147 smp_mb(); 148 if (atomic_read(&kvm->arch.vcpus_running)) { 149 kvm->arch.hpte_setup_done = 1; 150 goto out; 151 } 152 } 153 if (kvm->arch.hpt.order == order) { 154 /* We already have a suitable HPT */ 155 156 /* Set the entire HPT to 0, i.e. invalid HPTEs */ 157 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); 158 /* 159 * Reset all the reverse-mapping chains for all memslots 160 */ 161 kvmppc_rmap_reset(kvm); 162 /* Ensure that each vcpu will flush its TLB on next entry. */ 163 cpumask_setall(&kvm->arch.need_tlb_flush); 164 err = 0; 165 goto out; 166 } 167 168 if (kvm->arch.hpt.virt) { 169 kvmppc_free_hpt(&kvm->arch.hpt); 170 kvmppc_rmap_reset(kvm); 171 } 172 173 err = kvmppc_allocate_hpt(&info, order); 174 if (err < 0) 175 goto out; 176 kvmppc_set_hpt(kvm, &info); 177 178 out: 179 mutex_unlock(&kvm->lock); 180 return err; 181 } 182 183 void kvmppc_free_hpt(struct kvm_hpt_info *info) 184 { 185 vfree(info->rev); 186 if (info->cma) 187 kvm_free_hpt_cma(virt_to_page(info->virt), 188 1 << (info->order - PAGE_SHIFT)); 189 else if (info->virt) 190 free_pages(info->virt, info->order - PAGE_SHIFT); 191 info->virt = 0; 192 info->order = 0; 193 } 194 195 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ 196 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) 197 { 198 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; 199 } 200 201 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ 202 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) 203 { 204 return (pgsize == 0x10000) ? 0x1000 : 0; 205 } 206 207 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, 208 unsigned long porder) 209 { 210 unsigned long i; 211 unsigned long npages; 212 unsigned long hp_v, hp_r; 213 unsigned long addr, hash; 214 unsigned long psize; 215 unsigned long hp0, hp1; 216 unsigned long idx_ret; 217 long ret; 218 struct kvm *kvm = vcpu->kvm; 219 220 psize = 1ul << porder; 221 npages = memslot->npages >> (porder - PAGE_SHIFT); 222 223 /* VRMA can't be > 1TB */ 224 if (npages > 1ul << (40 - porder)) 225 npages = 1ul << (40 - porder); 226 /* Can't use more than 1 HPTE per HPTEG */ 227 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) 228 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; 229 230 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | 231 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); 232 hp1 = hpte1_pgsize_encoding(psize) | 233 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; 234 235 for (i = 0; i < npages; ++i) { 236 addr = i << porder; 237 /* can't use hpt_hash since va > 64 bits */ 238 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) 239 & kvmppc_hpt_mask(&kvm->arch.hpt); 240 /* 241 * We assume that the hash table is empty and no 242 * vcpus are using it at this stage. Since we create 243 * at most one HPTE per HPTEG, we just assume entry 7 244 * is available and use it. 245 */ 246 hash = (hash << 3) + 7; 247 hp_v = hp0 | ((addr >> 16) & ~0x7fUL); 248 hp_r = hp1 | addr; 249 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, 250 &idx_ret); 251 if (ret != H_SUCCESS) { 252 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", 253 addr, ret); 254 break; 255 } 256 } 257 } 258 259 int kvmppc_mmu_hv_init(void) 260 { 261 unsigned long host_lpid, rsvd_lpid; 262 263 if (!cpu_has_feature(CPU_FTR_HVMODE)) 264 return -EINVAL; 265 266 /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */ 267 host_lpid = mfspr(SPRN_LPID); 268 rsvd_lpid = LPID_RSVD; 269 270 kvmppc_init_lpid(rsvd_lpid + 1); 271 272 kvmppc_claim_lpid(host_lpid); 273 /* rsvd_lpid is reserved for use in partition switching */ 274 kvmppc_claim_lpid(rsvd_lpid); 275 276 return 0; 277 } 278 279 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) 280 { 281 unsigned long msr = vcpu->arch.intr_msr; 282 283 /* If transactional, change to suspend mode on IRQ delivery */ 284 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) 285 msr |= MSR_TS_S; 286 else 287 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; 288 kvmppc_set_msr(vcpu, msr); 289 } 290 291 static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, 292 long pte_index, unsigned long pteh, 293 unsigned long ptel, unsigned long *pte_idx_ret) 294 { 295 long ret; 296 297 /* Protect linux PTE lookup from page table destruction */ 298 rcu_read_lock_sched(); /* this disables preemption too */ 299 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, 300 current->mm->pgd, false, pte_idx_ret); 301 rcu_read_unlock_sched(); 302 if (ret == H_TOO_HARD) { 303 /* this can't happen */ 304 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); 305 ret = H_RESOURCE; /* or something */ 306 } 307 return ret; 308 309 } 310 311 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, 312 gva_t eaddr) 313 { 314 u64 mask; 315 int i; 316 317 for (i = 0; i < vcpu->arch.slb_nr; i++) { 318 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) 319 continue; 320 321 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) 322 mask = ESID_MASK_1T; 323 else 324 mask = ESID_MASK; 325 326 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) 327 return &vcpu->arch.slb[i]; 328 } 329 return NULL; 330 } 331 332 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, 333 unsigned long ea) 334 { 335 unsigned long ra_mask; 336 337 ra_mask = hpte_page_size(v, r) - 1; 338 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); 339 } 340 341 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 342 struct kvmppc_pte *gpte, bool data, bool iswrite) 343 { 344 struct kvm *kvm = vcpu->kvm; 345 struct kvmppc_slb *slbe; 346 unsigned long slb_v; 347 unsigned long pp, key; 348 unsigned long v, orig_v, gr; 349 __be64 *hptep; 350 int index; 351 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); 352 353 /* Get SLB entry */ 354 if (virtmode) { 355 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); 356 if (!slbe) 357 return -EINVAL; 358 slb_v = slbe->origv; 359 } else { 360 /* real mode access */ 361 slb_v = vcpu->kvm->arch.vrma_slb_v; 362 } 363 364 preempt_disable(); 365 /* Find the HPTE in the hash table */ 366 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 367 HPTE_V_VALID | HPTE_V_ABSENT); 368 if (index < 0) { 369 preempt_enable(); 370 return -ENOENT; 371 } 372 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); 373 v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; 374 if (cpu_has_feature(CPU_FTR_ARCH_300)) 375 v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); 376 gr = kvm->arch.hpt.rev[index].guest_rpte; 377 378 unlock_hpte(hptep, orig_v); 379 preempt_enable(); 380 381 gpte->eaddr = eaddr; 382 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 383 384 /* Get PP bits and key for permission check */ 385 pp = gr & (HPTE_R_PP0 | HPTE_R_PP); 386 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; 387 key &= slb_v; 388 389 /* Calculate permissions */ 390 gpte->may_read = hpte_read_permission(pp, key); 391 gpte->may_write = hpte_write_permission(pp, key); 392 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); 393 394 /* Storage key permission check for POWER7 */ 395 if (data && virtmode) { 396 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); 397 if (amrfield & 1) 398 gpte->may_read = 0; 399 if (amrfield & 2) 400 gpte->may_write = 0; 401 } 402 403 /* Get the guest physical address */ 404 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); 405 return 0; 406 } 407 408 /* 409 * Quick test for whether an instruction is a load or a store. 410 * If the instruction is a load or a store, then this will indicate 411 * which it is, at least on server processors. (Embedded processors 412 * have some external PID instructions that don't follow the rule 413 * embodied here.) If the instruction isn't a load or store, then 414 * this doesn't return anything useful. 415 */ 416 static int instruction_is_store(unsigned int instr) 417 { 418 unsigned int mask; 419 420 mask = 0x10000000; 421 if ((instr & 0xfc000000) == 0x7c000000) 422 mask = 0x100; /* major opcode 31 */ 423 return (instr & mask) != 0; 424 } 425 426 int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 427 unsigned long gpa, gva_t ea, int is_store) 428 { 429 u32 last_inst; 430 431 /* 432 * If we fail, we just return to the guest and try executing it again. 433 */ 434 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != 435 EMULATE_DONE) 436 return RESUME_GUEST; 437 438 /* 439 * WARNING: We do not know for sure whether the instruction we just 440 * read from memory is the same that caused the fault in the first 441 * place. If the instruction we read is neither an load or a store, 442 * then it can't access memory, so we don't need to worry about 443 * enforcing access permissions. So, assuming it is a load or 444 * store, we just check that its direction (load or store) is 445 * consistent with the original fault, since that's what we 446 * checked the access permissions against. If there is a mismatch 447 * we just return and retry the instruction. 448 */ 449 450 if (instruction_is_store(last_inst) != !!is_store) 451 return RESUME_GUEST; 452 453 /* 454 * Emulated accesses are emulated by looking at the hash for 455 * translation once, then performing the access later. The 456 * translation could be invalidated in the meantime in which 457 * point performing the subsequent memory access on the old 458 * physical address could possibly be a security hole for the 459 * guest (but not the host). 460 * 461 * This is less of an issue for MMIO stores since they aren't 462 * globally visible. It could be an issue for MMIO loads to 463 * a certain extent but we'll ignore it for now. 464 */ 465 466 vcpu->arch.paddr_accessed = gpa; 467 vcpu->arch.vaddr_accessed = ea; 468 return kvmppc_emulate_mmio(run, vcpu); 469 } 470 471 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, 472 unsigned long ea, unsigned long dsisr) 473 { 474 struct kvm *kvm = vcpu->kvm; 475 unsigned long hpte[3], r; 476 unsigned long hnow_v, hnow_r; 477 __be64 *hptep; 478 unsigned long mmu_seq, psize, pte_size; 479 unsigned long gpa_base, gfn_base; 480 unsigned long gpa, gfn, hva, pfn; 481 struct kvm_memory_slot *memslot; 482 unsigned long *rmap; 483 struct revmap_entry *rev; 484 struct page *page, *pages[1]; 485 long index, ret, npages; 486 bool is_ci; 487 unsigned int writing, write_ok; 488 struct vm_area_struct *vma; 489 unsigned long rcbits; 490 long mmio_update; 491 492 if (kvm_is_radix(kvm)) 493 return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); 494 495 /* 496 * Real-mode code has already searched the HPT and found the 497 * entry we're interested in. Lock the entry and check that 498 * it hasn't changed. If it has, just return and re-execute the 499 * instruction. 500 */ 501 if (ea != vcpu->arch.pgfault_addr) 502 return RESUME_GUEST; 503 504 if (vcpu->arch.pgfault_cache) { 505 mmio_update = atomic64_read(&kvm->arch.mmio_update); 506 if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { 507 r = vcpu->arch.pgfault_cache->rpte; 508 psize = hpte_page_size(vcpu->arch.pgfault_hpte[0], r); 509 gpa_base = r & HPTE_R_RPN & ~(psize - 1); 510 gfn_base = gpa_base >> PAGE_SHIFT; 511 gpa = gpa_base | (ea & (psize - 1)); 512 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 513 dsisr & DSISR_ISSTORE); 514 } 515 } 516 index = vcpu->arch.pgfault_index; 517 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); 518 rev = &kvm->arch.hpt.rev[index]; 519 preempt_disable(); 520 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 521 cpu_relax(); 522 hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; 523 hpte[1] = be64_to_cpu(hptep[1]); 524 hpte[2] = r = rev->guest_rpte; 525 unlock_hpte(hptep, hpte[0]); 526 preempt_enable(); 527 528 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 529 hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]); 530 hpte[1] = hpte_new_to_old_r(hpte[1]); 531 } 532 if (hpte[0] != vcpu->arch.pgfault_hpte[0] || 533 hpte[1] != vcpu->arch.pgfault_hpte[1]) 534 return RESUME_GUEST; 535 536 /* Translate the logical address and get the page */ 537 psize = hpte_page_size(hpte[0], r); 538 gpa_base = r & HPTE_R_RPN & ~(psize - 1); 539 gfn_base = gpa_base >> PAGE_SHIFT; 540 gpa = gpa_base | (ea & (psize - 1)); 541 gfn = gpa >> PAGE_SHIFT; 542 memslot = gfn_to_memslot(kvm, gfn); 543 544 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); 545 546 /* No memslot means it's an emulated MMIO region */ 547 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 548 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 549 dsisr & DSISR_ISSTORE); 550 551 /* 552 * This should never happen, because of the slot_is_aligned() 553 * check in kvmppc_do_h_enter(). 554 */ 555 if (gfn_base < memslot->base_gfn) 556 return -EFAULT; 557 558 /* used to check for invalidations in progress */ 559 mmu_seq = kvm->mmu_notifier_seq; 560 smp_rmb(); 561 562 ret = -EFAULT; 563 is_ci = false; 564 pfn = 0; 565 page = NULL; 566 pte_size = PAGE_SIZE; 567 writing = (dsisr & DSISR_ISSTORE) != 0; 568 /* If writing != 0, then the HPTE must allow writing, if we get here */ 569 write_ok = writing; 570 hva = gfn_to_hva_memslot(memslot, gfn); 571 npages = get_user_pages_fast(hva, 1, writing, pages); 572 if (npages < 1) { 573 /* Check if it's an I/O mapping */ 574 down_read(¤t->mm->mmap_sem); 575 vma = find_vma(current->mm, hva); 576 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && 577 (vma->vm_flags & VM_PFNMAP)) { 578 pfn = vma->vm_pgoff + 579 ((hva - vma->vm_start) >> PAGE_SHIFT); 580 pte_size = psize; 581 is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); 582 write_ok = vma->vm_flags & VM_WRITE; 583 } 584 up_read(¤t->mm->mmap_sem); 585 if (!pfn) 586 goto out_put; 587 } else { 588 page = pages[0]; 589 pfn = page_to_pfn(page); 590 if (PageHuge(page)) { 591 page = compound_head(page); 592 pte_size <<= compound_order(page); 593 } 594 /* if the guest wants write access, see if that is OK */ 595 if (!writing && hpte_is_writable(r)) { 596 pte_t *ptep, pte; 597 unsigned long flags; 598 /* 599 * We need to protect against page table destruction 600 * hugepage split and collapse. 601 */ 602 local_irq_save(flags); 603 ptep = find_current_mm_pte(current->mm->pgd, 604 hva, NULL, NULL); 605 if (ptep) { 606 pte = kvmppc_read_update_linux_pte(ptep, 1); 607 if (__pte_write(pte)) 608 write_ok = 1; 609 } 610 local_irq_restore(flags); 611 } 612 } 613 614 if (psize > pte_size) 615 goto out_put; 616 617 /* Check WIMG vs. the actual page we're accessing */ 618 if (!hpte_cache_flags_ok(r, is_ci)) { 619 if (is_ci) 620 goto out_put; 621 /* 622 * Allow guest to map emulated device memory as 623 * uncacheable, but actually make it cacheable. 624 */ 625 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; 626 } 627 628 /* 629 * Set the HPTE to point to pfn. 630 * Since the pfn is at PAGE_SIZE granularity, make sure we 631 * don't mask out lower-order bits if psize < PAGE_SIZE. 632 */ 633 if (psize < PAGE_SIZE) 634 psize = PAGE_SIZE; 635 r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | 636 ((pfn << PAGE_SHIFT) & ~(psize - 1)); 637 if (hpte_is_writable(r) && !write_ok) 638 r = hpte_make_readonly(r); 639 ret = RESUME_GUEST; 640 preempt_disable(); 641 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 642 cpu_relax(); 643 hnow_v = be64_to_cpu(hptep[0]); 644 hnow_r = be64_to_cpu(hptep[1]); 645 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 646 hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); 647 hnow_r = hpte_new_to_old_r(hnow_r); 648 } 649 if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || 650 rev->guest_rpte != hpte[2]) 651 /* HPTE has been changed under us; let the guest retry */ 652 goto out_unlock; 653 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; 654 655 /* Always put the HPTE in the rmap chain for the page base address */ 656 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; 657 lock_rmap(rmap); 658 659 /* Check if we might have been invalidated; let the guest retry if so */ 660 ret = RESUME_GUEST; 661 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { 662 unlock_rmap(rmap); 663 goto out_unlock; 664 } 665 666 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ 667 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; 668 r &= rcbits | ~(HPTE_R_R | HPTE_R_C); 669 670 if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { 671 /* HPTE was previously valid, so we need to invalidate it */ 672 unlock_rmap(rmap); 673 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); 674 kvmppc_invalidate_hpte(kvm, hptep, index); 675 /* don't lose previous R and C bits */ 676 r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); 677 } else { 678 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); 679 } 680 681 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 682 r = hpte_old_to_new_r(hpte[0], r); 683 hpte[0] = hpte_old_to_new_v(hpte[0]); 684 } 685 hptep[1] = cpu_to_be64(r); 686 eieio(); 687 __unlock_hpte(hptep, hpte[0]); 688 asm volatile("ptesync" : : : "memory"); 689 preempt_enable(); 690 if (page && hpte_is_writable(r)) 691 SetPageDirty(page); 692 693 out_put: 694 trace_kvm_page_fault_exit(vcpu, hpte, ret); 695 696 if (page) { 697 /* 698 * We drop pages[0] here, not page because page might 699 * have been set to the head page of a compound, but 700 * we have to drop the reference on the correct tail 701 * page to match the get inside gup() 702 */ 703 put_page(pages[0]); 704 } 705 return ret; 706 707 out_unlock: 708 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); 709 preempt_enable(); 710 goto out_put; 711 } 712 713 static void kvmppc_rmap_reset(struct kvm *kvm) 714 { 715 struct kvm_memslots *slots; 716 struct kvm_memory_slot *memslot; 717 int srcu_idx; 718 719 srcu_idx = srcu_read_lock(&kvm->srcu); 720 slots = kvm_memslots(kvm); 721 kvm_for_each_memslot(memslot, slots) { 722 /* 723 * This assumes it is acceptable to lose reference and 724 * change bits across a reset. 725 */ 726 memset(memslot->arch.rmap, 0, 727 memslot->npages * sizeof(*memslot->arch.rmap)); 728 } 729 srcu_read_unlock(&kvm->srcu, srcu_idx); 730 } 731 732 typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot, 733 unsigned long gfn); 734 735 static int kvm_handle_hva_range(struct kvm *kvm, 736 unsigned long start, 737 unsigned long end, 738 hva_handler_fn handler) 739 { 740 int ret; 741 int retval = 0; 742 struct kvm_memslots *slots; 743 struct kvm_memory_slot *memslot; 744 745 slots = kvm_memslots(kvm); 746 kvm_for_each_memslot(memslot, slots) { 747 unsigned long hva_start, hva_end; 748 gfn_t gfn, gfn_end; 749 750 hva_start = max(start, memslot->userspace_addr); 751 hva_end = min(end, memslot->userspace_addr + 752 (memslot->npages << PAGE_SHIFT)); 753 if (hva_start >= hva_end) 754 continue; 755 /* 756 * {gfn(page) | page intersects with [hva_start, hva_end)} = 757 * {gfn, gfn+1, ..., gfn_end-1}. 758 */ 759 gfn = hva_to_gfn_memslot(hva_start, memslot); 760 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); 761 762 for (; gfn < gfn_end; ++gfn) { 763 ret = handler(kvm, memslot, gfn); 764 retval |= ret; 765 } 766 } 767 768 return retval; 769 } 770 771 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 772 hva_handler_fn handler) 773 { 774 return kvm_handle_hva_range(kvm, hva, hva + 1, handler); 775 } 776 777 /* Must be called with both HPTE and rmap locked */ 778 static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, 779 unsigned long *rmapp, unsigned long gfn) 780 { 781 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); 782 struct revmap_entry *rev = kvm->arch.hpt.rev; 783 unsigned long j, h; 784 unsigned long ptel, psize, rcbits; 785 786 j = rev[i].forw; 787 if (j == i) { 788 /* chain is now empty */ 789 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); 790 } else { 791 /* remove i from chain */ 792 h = rev[i].back; 793 rev[h].forw = j; 794 rev[j].back = h; 795 rev[i].forw = rev[i].back = i; 796 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; 797 } 798 799 /* Now check and modify the HPTE */ 800 ptel = rev[i].guest_rpte; 801 psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel); 802 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && 803 hpte_rpn(ptel, psize) == gfn) { 804 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); 805 kvmppc_invalidate_hpte(kvm, hptep, i); 806 hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO); 807 /* Harvest R and C */ 808 rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); 809 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; 810 if (rcbits & HPTE_R_C) 811 kvmppc_update_rmap_change(rmapp, psize); 812 if (rcbits & ~rev[i].guest_rpte) { 813 rev[i].guest_rpte = ptel | rcbits; 814 note_hpte_modification(kvm, &rev[i]); 815 } 816 } 817 } 818 819 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 820 unsigned long gfn) 821 { 822 unsigned long i; 823 __be64 *hptep; 824 unsigned long *rmapp; 825 826 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 827 for (;;) { 828 lock_rmap(rmapp); 829 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { 830 unlock_rmap(rmapp); 831 break; 832 } 833 834 /* 835 * To avoid an ABBA deadlock with the HPTE lock bit, 836 * we can't spin on the HPTE lock while holding the 837 * rmap chain lock. 838 */ 839 i = *rmapp & KVMPPC_RMAP_INDEX; 840 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); 841 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 842 /* unlock rmap before spinning on the HPTE lock */ 843 unlock_rmap(rmapp); 844 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) 845 cpu_relax(); 846 continue; 847 } 848 849 kvmppc_unmap_hpte(kvm, i, rmapp, gfn); 850 unlock_rmap(rmapp); 851 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); 852 } 853 return 0; 854 } 855 856 int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) 857 { 858 hva_handler_fn handler; 859 860 handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; 861 kvm_handle_hva(kvm, hva, handler); 862 return 0; 863 } 864 865 int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) 866 { 867 hva_handler_fn handler; 868 869 handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; 870 kvm_handle_hva_range(kvm, start, end, handler); 871 return 0; 872 } 873 874 void kvmppc_core_flush_memslot_hv(struct kvm *kvm, 875 struct kvm_memory_slot *memslot) 876 { 877 unsigned long gfn; 878 unsigned long n; 879 unsigned long *rmapp; 880 881 gfn = memslot->base_gfn; 882 rmapp = memslot->arch.rmap; 883 for (n = memslot->npages; n; --n, ++gfn) { 884 if (kvm_is_radix(kvm)) { 885 kvm_unmap_radix(kvm, memslot, gfn); 886 continue; 887 } 888 /* 889 * Testing the present bit without locking is OK because 890 * the memslot has been marked invalid already, and hence 891 * no new HPTEs referencing this page can be created, 892 * thus the present bit can't go from 0 to 1. 893 */ 894 if (*rmapp & KVMPPC_RMAP_PRESENT) 895 kvm_unmap_rmapp(kvm, memslot, gfn); 896 ++rmapp; 897 } 898 } 899 900 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 901 unsigned long gfn) 902 { 903 struct revmap_entry *rev = kvm->arch.hpt.rev; 904 unsigned long head, i, j; 905 __be64 *hptep; 906 int ret = 0; 907 unsigned long *rmapp; 908 909 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 910 retry: 911 lock_rmap(rmapp); 912 if (*rmapp & KVMPPC_RMAP_REFERENCED) { 913 *rmapp &= ~KVMPPC_RMAP_REFERENCED; 914 ret = 1; 915 } 916 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { 917 unlock_rmap(rmapp); 918 return ret; 919 } 920 921 i = head = *rmapp & KVMPPC_RMAP_INDEX; 922 do { 923 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); 924 j = rev[i].forw; 925 926 /* If this HPTE isn't referenced, ignore it */ 927 if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) 928 continue; 929 930 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 931 /* unlock rmap before spinning on the HPTE lock */ 932 unlock_rmap(rmapp); 933 while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) 934 cpu_relax(); 935 goto retry; 936 } 937 938 /* Now check and modify the HPTE */ 939 if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && 940 (be64_to_cpu(hptep[1]) & HPTE_R_R)) { 941 kvmppc_clear_ref_hpte(kvm, hptep, i); 942 if (!(rev[i].guest_rpte & HPTE_R_R)) { 943 rev[i].guest_rpte |= HPTE_R_R; 944 note_hpte_modification(kvm, &rev[i]); 945 } 946 ret = 1; 947 } 948 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); 949 } while ((i = j) != head); 950 951 unlock_rmap(rmapp); 952 return ret; 953 } 954 955 int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end) 956 { 957 hva_handler_fn handler; 958 959 handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp; 960 return kvm_handle_hva_range(kvm, start, end, handler); 961 } 962 963 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, 964 unsigned long gfn) 965 { 966 struct revmap_entry *rev = kvm->arch.hpt.rev; 967 unsigned long head, i, j; 968 unsigned long *hp; 969 int ret = 1; 970 unsigned long *rmapp; 971 972 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 973 if (*rmapp & KVMPPC_RMAP_REFERENCED) 974 return 1; 975 976 lock_rmap(rmapp); 977 if (*rmapp & KVMPPC_RMAP_REFERENCED) 978 goto out; 979 980 if (*rmapp & KVMPPC_RMAP_PRESENT) { 981 i = head = *rmapp & KVMPPC_RMAP_INDEX; 982 do { 983 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); 984 j = rev[i].forw; 985 if (be64_to_cpu(hp[1]) & HPTE_R_R) 986 goto out; 987 } while ((i = j) != head); 988 } 989 ret = 0; 990 991 out: 992 unlock_rmap(rmapp); 993 return ret; 994 } 995 996 int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) 997 { 998 hva_handler_fn handler; 999 1000 handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp; 1001 return kvm_handle_hva(kvm, hva, handler); 1002 } 1003 1004 void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) 1005 { 1006 hva_handler_fn handler; 1007 1008 handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp; 1009 kvm_handle_hva(kvm, hva, handler); 1010 } 1011 1012 static int vcpus_running(struct kvm *kvm) 1013 { 1014 return atomic_read(&kvm->arch.vcpus_running) != 0; 1015 } 1016 1017 /* 1018 * Returns the number of system pages that are dirty. 1019 * This can be more than 1 if we find a huge-page HPTE. 1020 */ 1021 static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) 1022 { 1023 struct revmap_entry *rev = kvm->arch.hpt.rev; 1024 unsigned long head, i, j; 1025 unsigned long n; 1026 unsigned long v, r; 1027 __be64 *hptep; 1028 int npages_dirty = 0; 1029 1030 retry: 1031 lock_rmap(rmapp); 1032 if (*rmapp & KVMPPC_RMAP_CHANGED) { 1033 long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER) 1034 >> KVMPPC_RMAP_CHG_SHIFT; 1035 *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER); 1036 npages_dirty = 1; 1037 if (change_order > PAGE_SHIFT) 1038 npages_dirty = 1ul << (change_order - PAGE_SHIFT); 1039 } 1040 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { 1041 unlock_rmap(rmapp); 1042 return npages_dirty; 1043 } 1044 1045 i = head = *rmapp & KVMPPC_RMAP_INDEX; 1046 do { 1047 unsigned long hptep1; 1048 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); 1049 j = rev[i].forw; 1050 1051 /* 1052 * Checking the C (changed) bit here is racy since there 1053 * is no guarantee about when the hardware writes it back. 1054 * If the HPTE is not writable then it is stable since the 1055 * page can't be written to, and we would have done a tlbie 1056 * (which forces the hardware to complete any writeback) 1057 * when making the HPTE read-only. 1058 * If vcpus are running then this call is racy anyway 1059 * since the page could get dirtied subsequently, so we 1060 * expect there to be a further call which would pick up 1061 * any delayed C bit writeback. 1062 * Otherwise we need to do the tlbie even if C==0 in 1063 * order to pick up any delayed writeback of C. 1064 */ 1065 hptep1 = be64_to_cpu(hptep[1]); 1066 if (!(hptep1 & HPTE_R_C) && 1067 (!hpte_is_writable(hptep1) || vcpus_running(kvm))) 1068 continue; 1069 1070 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { 1071 /* unlock rmap before spinning on the HPTE lock */ 1072 unlock_rmap(rmapp); 1073 while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) 1074 cpu_relax(); 1075 goto retry; 1076 } 1077 1078 /* Now check and modify the HPTE */ 1079 if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { 1080 __unlock_hpte(hptep, be64_to_cpu(hptep[0])); 1081 continue; 1082 } 1083 1084 /* need to make it temporarily absent so C is stable */ 1085 hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); 1086 kvmppc_invalidate_hpte(kvm, hptep, i); 1087 v = be64_to_cpu(hptep[0]); 1088 r = be64_to_cpu(hptep[1]); 1089 if (r & HPTE_R_C) { 1090 hptep[1] = cpu_to_be64(r & ~HPTE_R_C); 1091 if (!(rev[i].guest_rpte & HPTE_R_C)) { 1092 rev[i].guest_rpte |= HPTE_R_C; 1093 note_hpte_modification(kvm, &rev[i]); 1094 } 1095 n = hpte_page_size(v, r); 1096 n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; 1097 if (n > npages_dirty) 1098 npages_dirty = n; 1099 eieio(); 1100 } 1101 v &= ~HPTE_V_ABSENT; 1102 v |= HPTE_V_VALID; 1103 __unlock_hpte(hptep, v); 1104 } while ((i = j) != head); 1105 1106 unlock_rmap(rmapp); 1107 return npages_dirty; 1108 } 1109 1110 void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, 1111 struct kvm_memory_slot *memslot, 1112 unsigned long *map) 1113 { 1114 unsigned long gfn; 1115 1116 if (!vpa->dirty || !vpa->pinned_addr) 1117 return; 1118 gfn = vpa->gpa >> PAGE_SHIFT; 1119 if (gfn < memslot->base_gfn || 1120 gfn >= memslot->base_gfn + memslot->npages) 1121 return; 1122 1123 vpa->dirty = false; 1124 if (map) 1125 __set_bit_le(gfn - memslot->base_gfn, map); 1126 } 1127 1128 long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, 1129 struct kvm_memory_slot *memslot, unsigned long *map) 1130 { 1131 unsigned long i, j; 1132 unsigned long *rmapp; 1133 1134 preempt_disable(); 1135 rmapp = memslot->arch.rmap; 1136 for (i = 0; i < memslot->npages; ++i) { 1137 int npages = kvm_test_clear_dirty_npages(kvm, rmapp); 1138 /* 1139 * Note that if npages > 0 then i must be a multiple of npages, 1140 * since we always put huge-page HPTEs in the rmap chain 1141 * corresponding to their page base address. 1142 */ 1143 if (npages && map) 1144 for (j = i; npages; ++j, --npages) 1145 __set_bit_le(j, map); 1146 ++rmapp; 1147 } 1148 preempt_enable(); 1149 return 0; 1150 } 1151 1152 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, 1153 unsigned long *nb_ret) 1154 { 1155 struct kvm_memory_slot *memslot; 1156 unsigned long gfn = gpa >> PAGE_SHIFT; 1157 struct page *page, *pages[1]; 1158 int npages; 1159 unsigned long hva, offset; 1160 int srcu_idx; 1161 1162 srcu_idx = srcu_read_lock(&kvm->srcu); 1163 memslot = gfn_to_memslot(kvm, gfn); 1164 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) 1165 goto err; 1166 hva = gfn_to_hva_memslot(memslot, gfn); 1167 npages = get_user_pages_fast(hva, 1, 1, pages); 1168 if (npages < 1) 1169 goto err; 1170 page = pages[0]; 1171 srcu_read_unlock(&kvm->srcu, srcu_idx); 1172 1173 offset = gpa & (PAGE_SIZE - 1); 1174 if (nb_ret) 1175 *nb_ret = PAGE_SIZE - offset; 1176 return page_address(page) + offset; 1177 1178 err: 1179 srcu_read_unlock(&kvm->srcu, srcu_idx); 1180 return NULL; 1181 } 1182 1183 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, 1184 bool dirty) 1185 { 1186 struct page *page = virt_to_page(va); 1187 struct kvm_memory_slot *memslot; 1188 unsigned long gfn; 1189 unsigned long *rmap; 1190 int srcu_idx; 1191 1192 put_page(page); 1193 1194 if (!dirty) 1195 return; 1196 1197 /* We need to mark this page dirty in the rmap chain */ 1198 gfn = gpa >> PAGE_SHIFT; 1199 srcu_idx = srcu_read_lock(&kvm->srcu); 1200 memslot = gfn_to_memslot(kvm, gfn); 1201 if (memslot) { 1202 if (!kvm_is_radix(kvm)) { 1203 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1204 lock_rmap(rmap); 1205 *rmap |= KVMPPC_RMAP_CHANGED; 1206 unlock_rmap(rmap); 1207 } else if (memslot->dirty_bitmap) { 1208 mark_page_dirty(kvm, gfn); 1209 } 1210 } 1211 srcu_read_unlock(&kvm->srcu, srcu_idx); 1212 } 1213 1214 /* 1215 * HPT resizing 1216 */ 1217 static int resize_hpt_allocate(struct kvm_resize_hpt *resize) 1218 { 1219 int rc; 1220 1221 rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); 1222 if (rc < 0) 1223 return rc; 1224 1225 resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n", 1226 resize->hpt.virt); 1227 1228 return 0; 1229 } 1230 1231 static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, 1232 unsigned long idx) 1233 { 1234 struct kvm *kvm = resize->kvm; 1235 struct kvm_hpt_info *old = &kvm->arch.hpt; 1236 struct kvm_hpt_info *new = &resize->hpt; 1237 unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; 1238 unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; 1239 __be64 *hptep, *new_hptep; 1240 unsigned long vpte, rpte, guest_rpte; 1241 int ret; 1242 struct revmap_entry *rev; 1243 unsigned long apsize, psize, avpn, pteg, hash; 1244 unsigned long new_idx, new_pteg, replace_vpte; 1245 1246 hptep = (__be64 *)(old->virt + (idx << 4)); 1247 1248 /* Guest is stopped, so new HPTEs can't be added or faulted 1249 * in, only unmapped or altered by host actions. So, it's 1250 * safe to check this before we take the HPTE lock */ 1251 vpte = be64_to_cpu(hptep[0]); 1252 if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) 1253 return 0; /* nothing to do */ 1254 1255 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) 1256 cpu_relax(); 1257 1258 vpte = be64_to_cpu(hptep[0]); 1259 1260 ret = 0; 1261 if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) 1262 /* Nothing to do */ 1263 goto out; 1264 1265 /* Unmap */ 1266 rev = &old->rev[idx]; 1267 guest_rpte = rev->guest_rpte; 1268 1269 ret = -EIO; 1270 apsize = hpte_page_size(vpte, guest_rpte); 1271 if (!apsize) 1272 goto out; 1273 1274 if (vpte & HPTE_V_VALID) { 1275 unsigned long gfn = hpte_rpn(guest_rpte, apsize); 1276 int srcu_idx = srcu_read_lock(&kvm->srcu); 1277 struct kvm_memory_slot *memslot = 1278 __gfn_to_memslot(kvm_memslots(kvm), gfn); 1279 1280 if (memslot) { 1281 unsigned long *rmapp; 1282 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; 1283 1284 lock_rmap(rmapp); 1285 kvmppc_unmap_hpte(kvm, idx, rmapp, gfn); 1286 unlock_rmap(rmapp); 1287 } 1288 1289 srcu_read_unlock(&kvm->srcu, srcu_idx); 1290 } 1291 1292 /* Reload PTE after unmap */ 1293 vpte = be64_to_cpu(hptep[0]); 1294 1295 BUG_ON(vpte & HPTE_V_VALID); 1296 BUG_ON(!(vpte & HPTE_V_ABSENT)); 1297 1298 ret = 0; 1299 if (!(vpte & HPTE_V_BOLTED)) 1300 goto out; 1301 1302 rpte = be64_to_cpu(hptep[1]); 1303 psize = hpte_base_page_size(vpte, rpte); 1304 avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23); 1305 pteg = idx / HPTES_PER_GROUP; 1306 if (vpte & HPTE_V_SECONDARY) 1307 pteg = ~pteg; 1308 1309 if (!(vpte & HPTE_V_1TB_SEG)) { 1310 unsigned long offset, vsid; 1311 1312 /* We only have 28 - 23 bits of offset in avpn */ 1313 offset = (avpn & 0x1f) << 23; 1314 vsid = avpn >> 5; 1315 /* We can find more bits from the pteg value */ 1316 if (psize < (1ULL << 23)) 1317 offset |= ((vsid ^ pteg) & old_hash_mask) * psize; 1318 1319 hash = vsid ^ (offset / psize); 1320 } else { 1321 unsigned long offset, vsid; 1322 1323 /* We only have 40 - 23 bits of seg_off in avpn */ 1324 offset = (avpn & 0x1ffff) << 23; 1325 vsid = avpn >> 17; 1326 if (psize < (1ULL << 23)) 1327 offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize; 1328 1329 hash = vsid ^ (vsid << 25) ^ (offset / psize); 1330 } 1331 1332 new_pteg = hash & new_hash_mask; 1333 if (vpte & HPTE_V_SECONDARY) { 1334 BUG_ON(~pteg != (hash & old_hash_mask)); 1335 new_pteg = ~new_pteg; 1336 } else { 1337 BUG_ON(pteg != (hash & old_hash_mask)); 1338 } 1339 1340 new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); 1341 new_hptep = (__be64 *)(new->virt + (new_idx << 4)); 1342 1343 replace_vpte = be64_to_cpu(new_hptep[0]); 1344 1345 if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1346 BUG_ON(new->order >= old->order); 1347 1348 if (replace_vpte & HPTE_V_BOLTED) { 1349 if (vpte & HPTE_V_BOLTED) 1350 /* Bolted collision, nothing we can do */ 1351 ret = -ENOSPC; 1352 /* Discard the new HPTE */ 1353 goto out; 1354 } 1355 1356 /* Discard the previous HPTE */ 1357 } 1358 1359 new_hptep[1] = cpu_to_be64(rpte); 1360 new->rev[new_idx].guest_rpte = guest_rpte; 1361 /* No need for a barrier, since new HPT isn't active */ 1362 new_hptep[0] = cpu_to_be64(vpte); 1363 unlock_hpte(new_hptep, vpte); 1364 1365 out: 1366 unlock_hpte(hptep, vpte); 1367 return ret; 1368 } 1369 1370 static int resize_hpt_rehash(struct kvm_resize_hpt *resize) 1371 { 1372 struct kvm *kvm = resize->kvm; 1373 unsigned long i; 1374 int rc; 1375 1376 /* 1377 * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs 1378 * that POWER9 uses, and could well hit a BUG_ON on POWER9. 1379 */ 1380 if (cpu_has_feature(CPU_FTR_ARCH_300)) 1381 return -EIO; 1382 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { 1383 rc = resize_hpt_rehash_hpte(resize, i); 1384 if (rc != 0) 1385 return rc; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static void resize_hpt_pivot(struct kvm_resize_hpt *resize) 1392 { 1393 struct kvm *kvm = resize->kvm; 1394 struct kvm_hpt_info hpt_tmp; 1395 1396 /* Exchange the pending tables in the resize structure with 1397 * the active tables */ 1398 1399 resize_hpt_debug(resize, "resize_hpt_pivot()\n"); 1400 1401 spin_lock(&kvm->mmu_lock); 1402 asm volatile("ptesync" : : : "memory"); 1403 1404 hpt_tmp = kvm->arch.hpt; 1405 kvmppc_set_hpt(kvm, &resize->hpt); 1406 resize->hpt = hpt_tmp; 1407 1408 spin_unlock(&kvm->mmu_lock); 1409 1410 synchronize_srcu_expedited(&kvm->srcu); 1411 1412 resize_hpt_debug(resize, "resize_hpt_pivot() done\n"); 1413 } 1414 1415 static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) 1416 { 1417 BUG_ON(kvm->arch.resize_hpt != resize); 1418 1419 if (!resize) 1420 return; 1421 1422 if (resize->hpt.virt) 1423 kvmppc_free_hpt(&resize->hpt); 1424 1425 kvm->arch.resize_hpt = NULL; 1426 kfree(resize); 1427 } 1428 1429 static void resize_hpt_prepare_work(struct work_struct *work) 1430 { 1431 struct kvm_resize_hpt *resize = container_of(work, 1432 struct kvm_resize_hpt, 1433 work); 1434 struct kvm *kvm = resize->kvm; 1435 int err; 1436 1437 resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", 1438 resize->order); 1439 1440 err = resize_hpt_allocate(resize); 1441 1442 mutex_lock(&kvm->lock); 1443 1444 resize->error = err; 1445 resize->prepare_done = true; 1446 1447 mutex_unlock(&kvm->lock); 1448 } 1449 1450 long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, 1451 struct kvm_ppc_resize_hpt *rhpt) 1452 { 1453 unsigned long flags = rhpt->flags; 1454 unsigned long shift = rhpt->shift; 1455 struct kvm_resize_hpt *resize; 1456 int ret; 1457 1458 if (flags != 0) 1459 return -EINVAL; 1460 1461 if (shift && ((shift < 18) || (shift > 46))) 1462 return -EINVAL; 1463 1464 mutex_lock(&kvm->lock); 1465 1466 resize = kvm->arch.resize_hpt; 1467 1468 if (resize) { 1469 if (resize->order == shift) { 1470 /* Suitable resize in progress */ 1471 if (resize->prepare_done) { 1472 ret = resize->error; 1473 if (ret != 0) 1474 resize_hpt_release(kvm, resize); 1475 } else { 1476 ret = 100; /* estimated time in ms */ 1477 } 1478 1479 goto out; 1480 } 1481 1482 /* not suitable, cancel it */ 1483 resize_hpt_release(kvm, resize); 1484 } 1485 1486 ret = 0; 1487 if (!shift) 1488 goto out; /* nothing to do */ 1489 1490 /* start new resize */ 1491 1492 resize = kzalloc(sizeof(*resize), GFP_KERNEL); 1493 if (!resize) { 1494 ret = -ENOMEM; 1495 goto out; 1496 } 1497 resize->order = shift; 1498 resize->kvm = kvm; 1499 INIT_WORK(&resize->work, resize_hpt_prepare_work); 1500 kvm->arch.resize_hpt = resize; 1501 1502 schedule_work(&resize->work); 1503 1504 ret = 100; /* estimated time in ms */ 1505 1506 out: 1507 mutex_unlock(&kvm->lock); 1508 return ret; 1509 } 1510 1511 static void resize_hpt_boot_vcpu(void *opaque) 1512 { 1513 /* Nothing to do, just force a KVM exit */ 1514 } 1515 1516 long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, 1517 struct kvm_ppc_resize_hpt *rhpt) 1518 { 1519 unsigned long flags = rhpt->flags; 1520 unsigned long shift = rhpt->shift; 1521 struct kvm_resize_hpt *resize; 1522 long ret; 1523 1524 if (flags != 0) 1525 return -EINVAL; 1526 1527 if (shift && ((shift < 18) || (shift > 46))) 1528 return -EINVAL; 1529 1530 mutex_lock(&kvm->lock); 1531 1532 resize = kvm->arch.resize_hpt; 1533 1534 /* This shouldn't be possible */ 1535 ret = -EIO; 1536 if (WARN_ON(!kvm->arch.hpte_setup_done)) 1537 goto out_no_hpt; 1538 1539 /* Stop VCPUs from running while we mess with the HPT */ 1540 kvm->arch.hpte_setup_done = 0; 1541 smp_mb(); 1542 1543 /* Boot all CPUs out of the guest so they re-read 1544 * hpte_setup_done */ 1545 on_each_cpu(resize_hpt_boot_vcpu, NULL, 1); 1546 1547 ret = -ENXIO; 1548 if (!resize || (resize->order != shift)) 1549 goto out; 1550 1551 ret = -EBUSY; 1552 if (!resize->prepare_done) 1553 goto out; 1554 1555 ret = resize->error; 1556 if (ret != 0) 1557 goto out; 1558 1559 ret = resize_hpt_rehash(resize); 1560 if (ret != 0) 1561 goto out; 1562 1563 resize_hpt_pivot(resize); 1564 1565 out: 1566 /* Let VCPUs run again */ 1567 kvm->arch.hpte_setup_done = 1; 1568 smp_mb(); 1569 out_no_hpt: 1570 resize_hpt_release(kvm, resize); 1571 mutex_unlock(&kvm->lock); 1572 return ret; 1573 } 1574 1575 /* 1576 * Functions for reading and writing the hash table via reads and 1577 * writes on a file descriptor. 1578 * 1579 * Reads return the guest view of the hash table, which has to be 1580 * pieced together from the real hash table and the guest_rpte 1581 * values in the revmap array. 1582 * 1583 * On writes, each HPTE written is considered in turn, and if it 1584 * is valid, it is written to the HPT as if an H_ENTER with the 1585 * exact flag set was done. When the invalid count is non-zero 1586 * in the header written to the stream, the kernel will make 1587 * sure that that many HPTEs are invalid, and invalidate them 1588 * if not. 1589 */ 1590 1591 struct kvm_htab_ctx { 1592 unsigned long index; 1593 unsigned long flags; 1594 struct kvm *kvm; 1595 int first_pass; 1596 }; 1597 1598 #define HPTE_SIZE (2 * sizeof(unsigned long)) 1599 1600 /* 1601 * Returns 1 if this HPT entry has been modified or has pending 1602 * R/C bit changes. 1603 */ 1604 static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) 1605 { 1606 unsigned long rcbits_unset; 1607 1608 if (revp->guest_rpte & HPTE_GR_MODIFIED) 1609 return 1; 1610 1611 /* Also need to consider changes in reference and changed bits */ 1612 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1613 if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && 1614 (be64_to_cpu(hptp[1]) & rcbits_unset)) 1615 return 1; 1616 1617 return 0; 1618 } 1619 1620 static long record_hpte(unsigned long flags, __be64 *hptp, 1621 unsigned long *hpte, struct revmap_entry *revp, 1622 int want_valid, int first_pass) 1623 { 1624 unsigned long v, r, hr; 1625 unsigned long rcbits_unset; 1626 int ok = 1; 1627 int valid, dirty; 1628 1629 /* Unmodified entries are uninteresting except on the first pass */ 1630 dirty = hpte_dirty(revp, hptp); 1631 if (!first_pass && !dirty) 1632 return 0; 1633 1634 valid = 0; 1635 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { 1636 valid = 1; 1637 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && 1638 !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) 1639 valid = 0; 1640 } 1641 if (valid != want_valid) 1642 return 0; 1643 1644 v = r = 0; 1645 if (valid || dirty) { 1646 /* lock the HPTE so it's stable and read it */ 1647 preempt_disable(); 1648 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) 1649 cpu_relax(); 1650 v = be64_to_cpu(hptp[0]); 1651 hr = be64_to_cpu(hptp[1]); 1652 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 1653 v = hpte_new_to_old_v(v, hr); 1654 hr = hpte_new_to_old_r(hr); 1655 } 1656 1657 /* re-evaluate valid and dirty from synchronized HPTE value */ 1658 valid = !!(v & HPTE_V_VALID); 1659 dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); 1660 1661 /* Harvest R and C into guest view if necessary */ 1662 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); 1663 if (valid && (rcbits_unset & hr)) { 1664 revp->guest_rpte |= (hr & 1665 (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; 1666 dirty = 1; 1667 } 1668 1669 if (v & HPTE_V_ABSENT) { 1670 v &= ~HPTE_V_ABSENT; 1671 v |= HPTE_V_VALID; 1672 valid = 1; 1673 } 1674 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) 1675 valid = 0; 1676 1677 r = revp->guest_rpte; 1678 /* only clear modified if this is the right sort of entry */ 1679 if (valid == want_valid && dirty) { 1680 r &= ~HPTE_GR_MODIFIED; 1681 revp->guest_rpte = r; 1682 } 1683 unlock_hpte(hptp, be64_to_cpu(hptp[0])); 1684 preempt_enable(); 1685 if (!(valid == want_valid && (first_pass || dirty))) 1686 ok = 0; 1687 } 1688 hpte[0] = cpu_to_be64(v); 1689 hpte[1] = cpu_to_be64(r); 1690 return ok; 1691 } 1692 1693 static ssize_t kvm_htab_read(struct file *file, char __user *buf, 1694 size_t count, loff_t *ppos) 1695 { 1696 struct kvm_htab_ctx *ctx = file->private_data; 1697 struct kvm *kvm = ctx->kvm; 1698 struct kvm_get_htab_header hdr; 1699 __be64 *hptp; 1700 struct revmap_entry *revp; 1701 unsigned long i, nb, nw; 1702 unsigned long __user *lbuf; 1703 struct kvm_get_htab_header __user *hptr; 1704 unsigned long flags; 1705 int first_pass; 1706 unsigned long hpte[2]; 1707 1708 if (!access_ok(VERIFY_WRITE, buf, count)) 1709 return -EFAULT; 1710 1711 first_pass = ctx->first_pass; 1712 flags = ctx->flags; 1713 1714 i = ctx->index; 1715 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); 1716 revp = kvm->arch.hpt.rev + i; 1717 lbuf = (unsigned long __user *)buf; 1718 1719 nb = 0; 1720 while (nb + sizeof(hdr) + HPTE_SIZE < count) { 1721 /* Initialize header */ 1722 hptr = (struct kvm_get_htab_header __user *)buf; 1723 hdr.n_valid = 0; 1724 hdr.n_invalid = 0; 1725 nw = nb; 1726 nb += sizeof(hdr); 1727 lbuf = (unsigned long __user *)(buf + sizeof(hdr)); 1728 1729 /* Skip uninteresting entries, i.e. clean on not-first pass */ 1730 if (!first_pass) { 1731 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && 1732 !hpte_dirty(revp, hptp)) { 1733 ++i; 1734 hptp += 2; 1735 ++revp; 1736 } 1737 } 1738 hdr.index = i; 1739 1740 /* Grab a series of valid entries */ 1741 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && 1742 hdr.n_valid < 0xffff && 1743 nb + HPTE_SIZE < count && 1744 record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { 1745 /* valid entry, write it out */ 1746 ++hdr.n_valid; 1747 if (__put_user(hpte[0], lbuf) || 1748 __put_user(hpte[1], lbuf + 1)) 1749 return -EFAULT; 1750 nb += HPTE_SIZE; 1751 lbuf += 2; 1752 ++i; 1753 hptp += 2; 1754 ++revp; 1755 } 1756 /* Now skip invalid entries while we can */ 1757 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && 1758 hdr.n_invalid < 0xffff && 1759 record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { 1760 /* found an invalid entry */ 1761 ++hdr.n_invalid; 1762 ++i; 1763 hptp += 2; 1764 ++revp; 1765 } 1766 1767 if (hdr.n_valid || hdr.n_invalid) { 1768 /* write back the header */ 1769 if (__copy_to_user(hptr, &hdr, sizeof(hdr))) 1770 return -EFAULT; 1771 nw = nb; 1772 buf = (char __user *)lbuf; 1773 } else { 1774 nb = nw; 1775 } 1776 1777 /* Check if we've wrapped around the hash table */ 1778 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { 1779 i = 0; 1780 ctx->first_pass = 0; 1781 break; 1782 } 1783 } 1784 1785 ctx->index = i; 1786 1787 return nb; 1788 } 1789 1790 static ssize_t kvm_htab_write(struct file *file, const char __user *buf, 1791 size_t count, loff_t *ppos) 1792 { 1793 struct kvm_htab_ctx *ctx = file->private_data; 1794 struct kvm *kvm = ctx->kvm; 1795 struct kvm_get_htab_header hdr; 1796 unsigned long i, j; 1797 unsigned long v, r; 1798 unsigned long __user *lbuf; 1799 __be64 *hptp; 1800 unsigned long tmp[2]; 1801 ssize_t nb; 1802 long int err, ret; 1803 int hpte_setup; 1804 1805 if (!access_ok(VERIFY_READ, buf, count)) 1806 return -EFAULT; 1807 1808 /* lock out vcpus from running while we're doing this */ 1809 mutex_lock(&kvm->lock); 1810 hpte_setup = kvm->arch.hpte_setup_done; 1811 if (hpte_setup) { 1812 kvm->arch.hpte_setup_done = 0; /* temporarily */ 1813 /* order hpte_setup_done vs. vcpus_running */ 1814 smp_mb(); 1815 if (atomic_read(&kvm->arch.vcpus_running)) { 1816 kvm->arch.hpte_setup_done = 1; 1817 mutex_unlock(&kvm->lock); 1818 return -EBUSY; 1819 } 1820 } 1821 1822 err = 0; 1823 for (nb = 0; nb + sizeof(hdr) <= count; ) { 1824 err = -EFAULT; 1825 if (__copy_from_user(&hdr, buf, sizeof(hdr))) 1826 break; 1827 1828 err = 0; 1829 if (nb + hdr.n_valid * HPTE_SIZE > count) 1830 break; 1831 1832 nb += sizeof(hdr); 1833 buf += sizeof(hdr); 1834 1835 err = -EINVAL; 1836 i = hdr.index; 1837 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || 1838 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) 1839 break; 1840 1841 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); 1842 lbuf = (unsigned long __user *)buf; 1843 for (j = 0; j < hdr.n_valid; ++j) { 1844 __be64 hpte_v; 1845 __be64 hpte_r; 1846 1847 err = -EFAULT; 1848 if (__get_user(hpte_v, lbuf) || 1849 __get_user(hpte_r, lbuf + 1)) 1850 goto out; 1851 v = be64_to_cpu(hpte_v); 1852 r = be64_to_cpu(hpte_r); 1853 err = -EINVAL; 1854 if (!(v & HPTE_V_VALID)) 1855 goto out; 1856 lbuf += 2; 1857 nb += HPTE_SIZE; 1858 1859 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) 1860 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1861 err = -EIO; 1862 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, 1863 tmp); 1864 if (ret != H_SUCCESS) { 1865 pr_err("kvm_htab_write ret %ld i=%ld v=%lx " 1866 "r=%lx\n", ret, i, v, r); 1867 goto out; 1868 } 1869 if (!hpte_setup && is_vrma_hpte(v)) { 1870 unsigned long psize = hpte_base_page_size(v, r); 1871 unsigned long senc = slb_pgsize_encoding(psize); 1872 unsigned long lpcr; 1873 1874 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | 1875 (VRMA_VSID << SLB_VSID_SHIFT_1T); 1876 lpcr = senc << (LPCR_VRMASD_SH - 4); 1877 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); 1878 hpte_setup = 1; 1879 } 1880 ++i; 1881 hptp += 2; 1882 } 1883 1884 for (j = 0; j < hdr.n_invalid; ++j) { 1885 if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) 1886 kvmppc_do_h_remove(kvm, 0, i, 0, tmp); 1887 ++i; 1888 hptp += 2; 1889 } 1890 err = 0; 1891 } 1892 1893 out: 1894 /* Order HPTE updates vs. hpte_setup_done */ 1895 smp_wmb(); 1896 kvm->arch.hpte_setup_done = hpte_setup; 1897 mutex_unlock(&kvm->lock); 1898 1899 if (err) 1900 return err; 1901 return nb; 1902 } 1903 1904 static int kvm_htab_release(struct inode *inode, struct file *filp) 1905 { 1906 struct kvm_htab_ctx *ctx = filp->private_data; 1907 1908 filp->private_data = NULL; 1909 if (!(ctx->flags & KVM_GET_HTAB_WRITE)) 1910 atomic_dec(&ctx->kvm->arch.hpte_mod_interest); 1911 kvm_put_kvm(ctx->kvm); 1912 kfree(ctx); 1913 return 0; 1914 } 1915 1916 static const struct file_operations kvm_htab_fops = { 1917 .read = kvm_htab_read, 1918 .write = kvm_htab_write, 1919 .llseek = default_llseek, 1920 .release = kvm_htab_release, 1921 }; 1922 1923 int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) 1924 { 1925 int ret; 1926 struct kvm_htab_ctx *ctx; 1927 int rwflag; 1928 1929 /* reject flags we don't recognize */ 1930 if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) 1931 return -EINVAL; 1932 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1933 if (!ctx) 1934 return -ENOMEM; 1935 kvm_get_kvm(kvm); 1936 ctx->kvm = kvm; 1937 ctx->index = ghf->start_index; 1938 ctx->flags = ghf->flags; 1939 ctx->first_pass = 1; 1940 1941 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; 1942 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); 1943 if (ret < 0) { 1944 kfree(ctx); 1945 kvm_put_kvm(kvm); 1946 return ret; 1947 } 1948 1949 if (rwflag == O_RDONLY) { 1950 mutex_lock(&kvm->slots_lock); 1951 atomic_inc(&kvm->arch.hpte_mod_interest); 1952 /* make sure kvmppc_do_h_enter etc. see the increment */ 1953 synchronize_srcu_expedited(&kvm->srcu); 1954 mutex_unlock(&kvm->slots_lock); 1955 } 1956 1957 return ret; 1958 } 1959 1960 struct debugfs_htab_state { 1961 struct kvm *kvm; 1962 struct mutex mutex; 1963 unsigned long hpt_index; 1964 int chars_left; 1965 int buf_index; 1966 char buf[64]; 1967 }; 1968 1969 static int debugfs_htab_open(struct inode *inode, struct file *file) 1970 { 1971 struct kvm *kvm = inode->i_private; 1972 struct debugfs_htab_state *p; 1973 1974 p = kzalloc(sizeof(*p), GFP_KERNEL); 1975 if (!p) 1976 return -ENOMEM; 1977 1978 kvm_get_kvm(kvm); 1979 p->kvm = kvm; 1980 mutex_init(&p->mutex); 1981 file->private_data = p; 1982 1983 return nonseekable_open(inode, file); 1984 } 1985 1986 static int debugfs_htab_release(struct inode *inode, struct file *file) 1987 { 1988 struct debugfs_htab_state *p = file->private_data; 1989 1990 kvm_put_kvm(p->kvm); 1991 kfree(p); 1992 return 0; 1993 } 1994 1995 static ssize_t debugfs_htab_read(struct file *file, char __user *buf, 1996 size_t len, loff_t *ppos) 1997 { 1998 struct debugfs_htab_state *p = file->private_data; 1999 ssize_t ret, r; 2000 unsigned long i, n; 2001 unsigned long v, hr, gr; 2002 struct kvm *kvm; 2003 __be64 *hptp; 2004 2005 ret = mutex_lock_interruptible(&p->mutex); 2006 if (ret) 2007 return ret; 2008 2009 if (p->chars_left) { 2010 n = p->chars_left; 2011 if (n > len) 2012 n = len; 2013 r = copy_to_user(buf, p->buf + p->buf_index, n); 2014 n -= r; 2015 p->chars_left -= n; 2016 p->buf_index += n; 2017 buf += n; 2018 len -= n; 2019 ret = n; 2020 if (r) { 2021 if (!n) 2022 ret = -EFAULT; 2023 goto out; 2024 } 2025 } 2026 2027 kvm = p->kvm; 2028 i = p->hpt_index; 2029 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); 2030 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); 2031 ++i, hptp += 2) { 2032 if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) 2033 continue; 2034 2035 /* lock the HPTE so it's stable and read it */ 2036 preempt_disable(); 2037 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) 2038 cpu_relax(); 2039 v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; 2040 hr = be64_to_cpu(hptp[1]); 2041 gr = kvm->arch.hpt.rev[i].guest_rpte; 2042 unlock_hpte(hptp, v); 2043 preempt_enable(); 2044 2045 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) 2046 continue; 2047 2048 n = scnprintf(p->buf, sizeof(p->buf), 2049 "%6lx %.16lx %.16lx %.16lx\n", 2050 i, v, hr, gr); 2051 p->chars_left = n; 2052 if (n > len) 2053 n = len; 2054 r = copy_to_user(buf, p->buf, n); 2055 n -= r; 2056 p->chars_left -= n; 2057 p->buf_index = n; 2058 buf += n; 2059 len -= n; 2060 ret += n; 2061 if (r) { 2062 if (!ret) 2063 ret = -EFAULT; 2064 goto out; 2065 } 2066 } 2067 p->hpt_index = i; 2068 2069 out: 2070 mutex_unlock(&p->mutex); 2071 return ret; 2072 } 2073 2074 static ssize_t debugfs_htab_write(struct file *file, const char __user *buf, 2075 size_t len, loff_t *ppos) 2076 { 2077 return -EACCES; 2078 } 2079 2080 static const struct file_operations debugfs_htab_fops = { 2081 .owner = THIS_MODULE, 2082 .open = debugfs_htab_open, 2083 .release = debugfs_htab_release, 2084 .read = debugfs_htab_read, 2085 .write = debugfs_htab_write, 2086 .llseek = generic_file_llseek, 2087 }; 2088 2089 void kvmppc_mmu_debugfs_init(struct kvm *kvm) 2090 { 2091 kvm->arch.htab_dentry = debugfs_create_file("htab", 0400, 2092 kvm->arch.debugfs_dir, kvm, 2093 &debugfs_htab_fops); 2094 } 2095 2096 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) 2097 { 2098 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; 2099 2100 vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ 2101 2102 if (kvm_is_radix(vcpu->kvm)) 2103 mmu->xlate = kvmppc_mmu_radix_xlate; 2104 else 2105 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; 2106 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; 2107 2108 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; 2109 } 2110