1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/string.h> 11 #include <linux/kvm.h> 12 #include <linux/kvm_host.h> 13 14 #include <asm/kvm_ppc.h> 15 #include <asm/kvm_book3s.h> 16 #include <asm/page.h> 17 #include <asm/mmu.h> 18 #include <asm/pgtable.h> 19 #include <asm/pgalloc.h> 20 #include <asm/pte-walk.h> 21 22 /* 23 * Supported radix tree geometry. 24 * Like p9, we support either 5 or 9 bits at the first (lowest) level, 25 * for a page size of 64k or 4k. 26 */ 27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; 28 29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 30 struct kvmppc_pte *gpte, bool data, bool iswrite) 31 { 32 struct kvm *kvm = vcpu->kvm; 33 u32 pid; 34 int ret, level, ps; 35 __be64 prte, rpte; 36 unsigned long ptbl; 37 unsigned long root, pte, index; 38 unsigned long rts, bits, offset; 39 unsigned long gpa; 40 unsigned long proc_tbl_size; 41 42 /* Work out effective PID */ 43 switch (eaddr >> 62) { 44 case 0: 45 pid = vcpu->arch.pid; 46 break; 47 case 3: 48 pid = 0; 49 break; 50 default: 51 return -EINVAL; 52 } 53 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12); 54 if (pid * 16 >= proc_tbl_size) 55 return -EINVAL; 56 57 /* Read partition table to find root of tree for effective PID */ 58 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16); 59 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte)); 60 if (ret) 61 return ret; 62 63 root = be64_to_cpu(prte); 64 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | 65 ((root & RTS2_MASK) >> RTS2_SHIFT); 66 bits = root & RPDS_MASK; 67 root = root & RPDB_MASK; 68 69 /* P9 DD1 interprets RTS (radix tree size) differently */ 70 offset = rts + 31; 71 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 72 offset -= 3; 73 74 /* current implementations only support 52-bit space */ 75 if (offset != 52) 76 return -EINVAL; 77 78 for (level = 3; level >= 0; --level) { 79 if (level && bits != p9_supported_radix_bits[level]) 80 return -EINVAL; 81 if (level == 0 && !(bits == 5 || bits == 9)) 82 return -EINVAL; 83 offset -= bits; 84 index = (eaddr >> offset) & ((1UL << bits) - 1); 85 /* check that low bits of page table base are zero */ 86 if (root & ((1UL << (bits + 3)) - 1)) 87 return -EINVAL; 88 ret = kvm_read_guest(kvm, root + index * 8, 89 &rpte, sizeof(rpte)); 90 if (ret) 91 return ret; 92 pte = __be64_to_cpu(rpte); 93 if (!(pte & _PAGE_PRESENT)) 94 return -ENOENT; 95 if (pte & _PAGE_PTE) 96 break; 97 bits = pte & 0x1f; 98 root = pte & 0x0fffffffffffff00ul; 99 } 100 /* need a leaf at lowest level; 512GB pages not supported */ 101 if (level < 0 || level == 3) 102 return -EINVAL; 103 104 /* offset is now log base 2 of the page size */ 105 gpa = pte & 0x01fffffffffff000ul; 106 if (gpa & ((1ul << offset) - 1)) 107 return -EINVAL; 108 gpa += eaddr & ((1ul << offset) - 1); 109 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) 110 if (offset == mmu_psize_defs[ps].shift) 111 break; 112 gpte->page_size = ps; 113 114 gpte->eaddr = eaddr; 115 gpte->raddr = gpa; 116 117 /* Work out permissions */ 118 gpte->may_read = !!(pte & _PAGE_READ); 119 gpte->may_write = !!(pte & _PAGE_WRITE); 120 gpte->may_execute = !!(pte & _PAGE_EXEC); 121 if (kvmppc_get_msr(vcpu) & MSR_PR) { 122 if (pte & _PAGE_PRIVILEGED) { 123 gpte->may_read = 0; 124 gpte->may_write = 0; 125 gpte->may_execute = 0; 126 } 127 } else { 128 if (!(pte & _PAGE_PRIVILEGED)) { 129 /* Check AMR/IAMR to see if strict mode is in force */ 130 if (vcpu->arch.amr & (1ul << 62)) 131 gpte->may_read = 0; 132 if (vcpu->arch.amr & (1ul << 63)) 133 gpte->may_write = 0; 134 if (vcpu->arch.iamr & (1ul << 62)) 135 gpte->may_execute = 0; 136 } 137 } 138 139 return 0; 140 } 141 142 #ifdef CONFIG_PPC_64K_PAGES 143 #define MMU_BASE_PSIZE MMU_PAGE_64K 144 #else 145 #define MMU_BASE_PSIZE MMU_PAGE_4K 146 #endif 147 148 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, 149 unsigned int pshift) 150 { 151 int psize = MMU_BASE_PSIZE; 152 153 if (pshift >= PUD_SHIFT) 154 psize = MMU_PAGE_1G; 155 else if (pshift >= PMD_SHIFT) 156 psize = MMU_PAGE_2M; 157 addr &= ~0xfffUL; 158 addr |= mmu_psize_defs[psize].ap << 5; 159 asm volatile("ptesync": : :"memory"); 160 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 161 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 162 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) 163 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1) 164 : : "r" (addr), "r" (kvm->arch.lpid) : "memory"); 165 asm volatile("ptesync": : :"memory"); 166 } 167 168 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned long addr) 169 { 170 unsigned long rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ 171 172 asm volatile("ptesync": : :"memory"); 173 /* RIC=1 PRS=0 R=1 IS=2 */ 174 asm volatile(PPC_TLBIE_5(%0, %1, 1, 0, 1) 175 : : "r" (rb), "r" (kvm->arch.lpid) : "memory"); 176 asm volatile("ptesync": : :"memory"); 177 } 178 179 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, 180 unsigned long clr, unsigned long set, 181 unsigned long addr, unsigned int shift) 182 { 183 unsigned long old = 0; 184 185 if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) && 186 pte_present(*ptep)) { 187 /* have to invalidate it first */ 188 old = __radix_pte_update(ptep, _PAGE_PRESENT, 0); 189 kvmppc_radix_tlbie_page(kvm, addr, shift); 190 set |= _PAGE_PRESENT; 191 old &= _PAGE_PRESENT; 192 } 193 return __radix_pte_update(ptep, clr, set) | old; 194 } 195 196 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, 197 pte_t *ptep, pte_t pte) 198 { 199 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); 200 } 201 202 static struct kmem_cache *kvm_pte_cache; 203 204 static pte_t *kvmppc_pte_alloc(void) 205 { 206 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL); 207 } 208 209 static void kvmppc_pte_free(pte_t *ptep) 210 { 211 kmem_cache_free(kvm_pte_cache, ptep); 212 } 213 214 /* Like pmd_huge() and pmd_large(), but works regardless of config options */ 215 static inline int pmd_is_leaf(pmd_t pmd) 216 { 217 return !!(pmd_val(pmd) & _PAGE_PTE); 218 } 219 220 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, 221 unsigned int level, unsigned long mmu_seq) 222 { 223 pgd_t *pgd; 224 pud_t *pud, *new_pud = NULL; 225 pmd_t *pmd, *new_pmd = NULL; 226 pte_t *ptep, *new_ptep = NULL; 227 unsigned long old; 228 int ret; 229 230 /* Traverse the guest's 2nd-level tree, allocate new levels needed */ 231 pgd = kvm->arch.pgtable + pgd_index(gpa); 232 pud = NULL; 233 if (pgd_present(*pgd)) 234 pud = pud_offset(pgd, gpa); 235 else 236 new_pud = pud_alloc_one(kvm->mm, gpa); 237 238 pmd = NULL; 239 if (pud && pud_present(*pud) && !pud_huge(*pud)) 240 pmd = pmd_offset(pud, gpa); 241 else if (level <= 1) 242 new_pmd = pmd_alloc_one(kvm->mm, gpa); 243 244 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) 245 new_ptep = kvmppc_pte_alloc(); 246 247 /* Check if we might have been invalidated; let the guest retry if so */ 248 spin_lock(&kvm->mmu_lock); 249 ret = -EAGAIN; 250 if (mmu_notifier_retry(kvm, mmu_seq)) 251 goto out_unlock; 252 253 /* Now traverse again under the lock and change the tree */ 254 ret = -ENOMEM; 255 if (pgd_none(*pgd)) { 256 if (!new_pud) 257 goto out_unlock; 258 pgd_populate(kvm->mm, pgd, new_pud); 259 new_pud = NULL; 260 } 261 pud = pud_offset(pgd, gpa); 262 if (pud_huge(*pud)) { 263 unsigned long hgpa = gpa & PUD_MASK; 264 265 /* 266 * If we raced with another CPU which has just put 267 * a 1GB pte in after we saw a pmd page, try again. 268 */ 269 if (level <= 1 && !new_pmd) { 270 ret = -EAGAIN; 271 goto out_unlock; 272 } 273 /* Check if we raced and someone else has set the same thing */ 274 if (level == 2 && pud_raw(*pud) == pte_raw(pte)) { 275 ret = 0; 276 goto out_unlock; 277 } 278 /* Valid 1GB page here already, remove it */ 279 old = kvmppc_radix_update_pte(kvm, (pte_t *)pud, 280 ~0UL, 0, hgpa, PUD_SHIFT); 281 kvmppc_radix_tlbie_page(kvm, hgpa, PUD_SHIFT); 282 if (old & _PAGE_DIRTY) { 283 unsigned long gfn = hgpa >> PAGE_SHIFT; 284 struct kvm_memory_slot *memslot; 285 memslot = gfn_to_memslot(kvm, gfn); 286 if (memslot && memslot->dirty_bitmap) 287 kvmppc_update_dirty_map(memslot, 288 gfn, PUD_SIZE); 289 } 290 } 291 if (level == 2) { 292 if (!pud_none(*pud)) { 293 /* 294 * There's a page table page here, but we wanted to 295 * install a large page, so remove and free the page 296 * table page. new_pmd will be NULL since level == 2. 297 */ 298 new_pmd = pmd_offset(pud, 0); 299 pud_clear(pud); 300 kvmppc_radix_flush_pwc(kvm, gpa); 301 } 302 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); 303 ret = 0; 304 goto out_unlock; 305 } 306 if (pud_none(*pud)) { 307 if (!new_pmd) 308 goto out_unlock; 309 pud_populate(kvm->mm, pud, new_pmd); 310 new_pmd = NULL; 311 } 312 pmd = pmd_offset(pud, gpa); 313 if (pmd_is_leaf(*pmd)) { 314 unsigned long lgpa = gpa & PMD_MASK; 315 316 /* 317 * If we raced with another CPU which has just put 318 * a 2MB pte in after we saw a pte page, try again. 319 */ 320 if (level == 0 && !new_ptep) { 321 ret = -EAGAIN; 322 goto out_unlock; 323 } 324 /* Check if we raced and someone else has set the same thing */ 325 if (level == 1 && pmd_raw(*pmd) == pte_raw(pte)) { 326 ret = 0; 327 goto out_unlock; 328 } 329 /* Valid 2MB page here already, remove it */ 330 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), 331 ~0UL, 0, lgpa, PMD_SHIFT); 332 kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT); 333 if (old & _PAGE_DIRTY) { 334 unsigned long gfn = lgpa >> PAGE_SHIFT; 335 struct kvm_memory_slot *memslot; 336 memslot = gfn_to_memslot(kvm, gfn); 337 if (memslot && memslot->dirty_bitmap) 338 kvmppc_update_dirty_map(memslot, 339 gfn, PMD_SIZE); 340 } 341 } 342 if (level == 1) { 343 if (!pmd_none(*pmd)) { 344 /* 345 * There's a page table page here, but we wanted to 346 * install a large page, so remove and free the page 347 * table page. new_ptep will be NULL since level == 1. 348 */ 349 new_ptep = pte_offset_kernel(pmd, 0); 350 pmd_clear(pmd); 351 kvmppc_radix_flush_pwc(kvm, gpa); 352 } 353 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); 354 ret = 0; 355 goto out_unlock; 356 } 357 if (pmd_none(*pmd)) { 358 if (!new_ptep) 359 goto out_unlock; 360 pmd_populate(kvm->mm, pmd, new_ptep); 361 new_ptep = NULL; 362 } 363 ptep = pte_offset_kernel(pmd, gpa); 364 if (pte_present(*ptep)) { 365 /* Check if someone else set the same thing */ 366 if (pte_raw(*ptep) == pte_raw(pte)) { 367 ret = 0; 368 goto out_unlock; 369 } 370 /* PTE was previously valid, so invalidate it */ 371 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 372 0, gpa, 0); 373 kvmppc_radix_tlbie_page(kvm, gpa, 0); 374 if (old & _PAGE_DIRTY) 375 mark_page_dirty(kvm, gpa >> PAGE_SHIFT); 376 } 377 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); 378 ret = 0; 379 380 out_unlock: 381 spin_unlock(&kvm->mmu_lock); 382 if (new_pud) 383 pud_free(kvm->mm, new_pud); 384 if (new_pmd) 385 pmd_free(kvm->mm, new_pmd); 386 if (new_ptep) 387 kvmppc_pte_free(new_ptep); 388 return ret; 389 } 390 391 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, 392 unsigned long ea, unsigned long dsisr) 393 { 394 struct kvm *kvm = vcpu->kvm; 395 unsigned long mmu_seq, pte_size; 396 unsigned long gpa, gfn, hva, pfn; 397 struct kvm_memory_slot *memslot; 398 struct page *page = NULL; 399 long ret; 400 bool writing; 401 bool upgrade_write = false; 402 bool *upgrade_p = &upgrade_write; 403 pte_t pte, *ptep; 404 unsigned long pgflags; 405 unsigned int shift, level; 406 407 /* Check for unusual errors */ 408 if (dsisr & DSISR_UNSUPP_MMU) { 409 pr_err("KVM: Got unsupported MMU fault\n"); 410 return -EFAULT; 411 } 412 if (dsisr & DSISR_BADACCESS) { 413 /* Reflect to the guest as DSI */ 414 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); 415 kvmppc_core_queue_data_storage(vcpu, ea, dsisr); 416 return RESUME_GUEST; 417 } 418 419 /* Translate the logical address and get the page */ 420 gpa = vcpu->arch.fault_gpa & ~0xfffUL; 421 gpa &= ~0xF000000000000000ul; 422 gfn = gpa >> PAGE_SHIFT; 423 if (!(dsisr & DSISR_PRTABLE_FAULT)) 424 gpa |= ea & 0xfff; 425 memslot = gfn_to_memslot(kvm, gfn); 426 427 /* No memslot means it's an emulated MMIO region */ 428 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { 429 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | 430 DSISR_SET_RC)) { 431 /* 432 * Bad address in guest page table tree, or other 433 * unusual error - reflect it to the guest as DSI. 434 */ 435 kvmppc_core_queue_data_storage(vcpu, ea, dsisr); 436 return RESUME_GUEST; 437 } 438 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, 439 dsisr & DSISR_ISSTORE); 440 } 441 442 writing = (dsisr & DSISR_ISSTORE) != 0; 443 if (memslot->flags & KVM_MEM_READONLY) { 444 if (writing) { 445 /* give the guest a DSI */ 446 dsisr = DSISR_ISSTORE | DSISR_PROTFAULT; 447 kvmppc_core_queue_data_storage(vcpu, ea, dsisr); 448 return RESUME_GUEST; 449 } 450 upgrade_p = NULL; 451 } 452 453 if (dsisr & DSISR_SET_RC) { 454 /* 455 * Need to set an R or C bit in the 2nd-level tables; 456 * since we are just helping out the hardware here, 457 * it is sufficient to do what the hardware does. 458 */ 459 pgflags = _PAGE_ACCESSED; 460 if (writing) 461 pgflags |= _PAGE_DIRTY; 462 /* 463 * We are walking the secondary page table here. We can do this 464 * without disabling irq. 465 */ 466 spin_lock(&kvm->mmu_lock); 467 ptep = __find_linux_pte(kvm->arch.pgtable, 468 gpa, NULL, &shift); 469 if (ptep && pte_present(*ptep) && 470 (!writing || pte_write(*ptep))) { 471 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, 472 gpa, shift); 473 dsisr &= ~DSISR_SET_RC; 474 } 475 spin_unlock(&kvm->mmu_lock); 476 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | 477 DSISR_PROTFAULT | DSISR_SET_RC))) 478 return RESUME_GUEST; 479 } 480 481 /* used to check for invalidations in progress */ 482 mmu_seq = kvm->mmu_notifier_seq; 483 smp_rmb(); 484 485 /* 486 * Do a fast check first, since __gfn_to_pfn_memslot doesn't 487 * do it with !atomic && !async, which is how we call it. 488 * We always ask for write permission since the common case 489 * is that the page is writable. 490 */ 491 hva = gfn_to_hva_memslot(memslot, gfn); 492 if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { 493 pfn = page_to_pfn(page); 494 upgrade_write = true; 495 } else { 496 /* Call KVM generic code to do the slow-path check */ 497 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, 498 writing, upgrade_p); 499 if (is_error_noslot_pfn(pfn)) 500 return -EFAULT; 501 page = NULL; 502 if (pfn_valid(pfn)) { 503 page = pfn_to_page(pfn); 504 if (PageReserved(page)) 505 page = NULL; 506 } 507 } 508 509 /* See if we can insert a 1GB or 2MB large PTE here */ 510 level = 0; 511 if (page && PageCompound(page)) { 512 pte_size = PAGE_SIZE << compound_order(compound_head(page)); 513 if (pte_size >= PUD_SIZE && 514 (gpa & (PUD_SIZE - PAGE_SIZE)) == 515 (hva & (PUD_SIZE - PAGE_SIZE))) { 516 level = 2; 517 pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1); 518 } else if (pte_size >= PMD_SIZE && 519 (gpa & (PMD_SIZE - PAGE_SIZE)) == 520 (hva & (PMD_SIZE - PAGE_SIZE))) { 521 level = 1; 522 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1); 523 } 524 } 525 526 /* 527 * Compute the PTE value that we need to insert. 528 */ 529 if (page) { 530 pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE | 531 _PAGE_ACCESSED; 532 if (writing || upgrade_write) 533 pgflags |= _PAGE_WRITE | _PAGE_DIRTY; 534 pte = pfn_pte(pfn, __pgprot(pgflags)); 535 } else { 536 /* 537 * Read the PTE from the process' radix tree and use that 538 * so we get the attribute bits. 539 */ 540 local_irq_disable(); 541 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); 542 pte = *ptep; 543 local_irq_enable(); 544 if (shift == PUD_SHIFT && 545 (gpa & (PUD_SIZE - PAGE_SIZE)) == 546 (hva & (PUD_SIZE - PAGE_SIZE))) { 547 level = 2; 548 } else if (shift == PMD_SHIFT && 549 (gpa & (PMD_SIZE - PAGE_SIZE)) == 550 (hva & (PMD_SIZE - PAGE_SIZE))) { 551 level = 1; 552 } else if (shift && shift != PAGE_SHIFT) { 553 /* Adjust PFN */ 554 unsigned long mask = (1ul << shift) - PAGE_SIZE; 555 pte = __pte(pte_val(pte) | (hva & mask)); 556 } 557 if (!(writing || upgrade_write)) 558 pte = __pte(pte_val(pte) & ~ _PAGE_WRITE); 559 pte = __pte(pte_val(pte) | _PAGE_EXEC); 560 } 561 562 /* Allocate space in the tree and write the PTE */ 563 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); 564 565 if (page) { 566 if (!ret && (pte_val(pte) & _PAGE_WRITE)) 567 set_page_dirty_lock(page); 568 put_page(page); 569 } 570 571 if (ret == 0 || ret == -EAGAIN) 572 ret = RESUME_GUEST; 573 return ret; 574 } 575 576 /* Called with kvm->lock held */ 577 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 578 unsigned long gfn) 579 { 580 pte_t *ptep; 581 unsigned long gpa = gfn << PAGE_SHIFT; 582 unsigned int shift; 583 unsigned long old; 584 585 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 586 if (ptep && pte_present(*ptep)) { 587 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0, 588 gpa, shift); 589 kvmppc_radix_tlbie_page(kvm, gpa, shift); 590 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { 591 unsigned long npages = 1; 592 if (shift) 593 npages = 1ul << (shift - PAGE_SHIFT); 594 kvmppc_update_dirty_map(memslot, gfn, npages); 595 } 596 } 597 return 0; 598 } 599 600 /* Called with kvm->lock held */ 601 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 602 unsigned long gfn) 603 { 604 pte_t *ptep; 605 unsigned long gpa = gfn << PAGE_SHIFT; 606 unsigned int shift; 607 int ref = 0; 608 609 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 610 if (ptep && pte_present(*ptep) && pte_young(*ptep)) { 611 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, 612 gpa, shift); 613 /* XXX need to flush tlb here? */ 614 ref = 1; 615 } 616 return ref; 617 } 618 619 /* Called with kvm->lock held */ 620 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 621 unsigned long gfn) 622 { 623 pte_t *ptep; 624 unsigned long gpa = gfn << PAGE_SHIFT; 625 unsigned int shift; 626 int ref = 0; 627 628 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 629 if (ptep && pte_present(*ptep) && pte_young(*ptep)) 630 ref = 1; 631 return ref; 632 } 633 634 /* Returns the number of PAGE_SIZE pages that are dirty */ 635 static int kvm_radix_test_clear_dirty(struct kvm *kvm, 636 struct kvm_memory_slot *memslot, int pagenum) 637 { 638 unsigned long gfn = memslot->base_gfn + pagenum; 639 unsigned long gpa = gfn << PAGE_SHIFT; 640 pte_t *ptep; 641 unsigned int shift; 642 int ret = 0; 643 644 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); 645 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { 646 ret = 1; 647 if (shift) 648 ret = 1 << (shift - PAGE_SHIFT); 649 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, 650 gpa, shift); 651 kvmppc_radix_tlbie_page(kvm, gpa, shift); 652 } 653 return ret; 654 } 655 656 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, 657 struct kvm_memory_slot *memslot, unsigned long *map) 658 { 659 unsigned long i, j; 660 int npages; 661 662 for (i = 0; i < memslot->npages; i = j) { 663 npages = kvm_radix_test_clear_dirty(kvm, memslot, i); 664 665 /* 666 * Note that if npages > 0 then i must be a multiple of npages, 667 * since huge pages are only used to back the guest at guest 668 * real addresses that are a multiple of their size. 669 * Since we have at most one PTE covering any given guest 670 * real address, if npages > 1 we can skip to i + npages. 671 */ 672 j = i + 1; 673 if (npages) { 674 set_dirty_bits(map, i, npages); 675 j = i + npages; 676 } 677 } 678 return 0; 679 } 680 681 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info, 682 int psize, int *indexp) 683 { 684 if (!mmu_psize_defs[psize].shift) 685 return; 686 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift | 687 (mmu_psize_defs[psize].ap << 29); 688 ++(*indexp); 689 } 690 691 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info) 692 { 693 int i; 694 695 if (!radix_enabled()) 696 return -EINVAL; 697 memset(info, 0, sizeof(*info)); 698 699 /* 4k page size */ 700 info->geometries[0].page_shift = 12; 701 info->geometries[0].level_bits[0] = 9; 702 for (i = 1; i < 4; ++i) 703 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i]; 704 /* 64k page size */ 705 info->geometries[1].page_shift = 16; 706 for (i = 0; i < 4; ++i) 707 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i]; 708 709 i = 0; 710 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i); 711 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i); 712 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i); 713 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i); 714 715 return 0; 716 } 717 718 int kvmppc_init_vm_radix(struct kvm *kvm) 719 { 720 kvm->arch.pgtable = pgd_alloc(kvm->mm); 721 if (!kvm->arch.pgtable) 722 return -ENOMEM; 723 return 0; 724 } 725 726 void kvmppc_free_radix(struct kvm *kvm) 727 { 728 unsigned long ig, iu, im; 729 pte_t *pte; 730 pmd_t *pmd; 731 pud_t *pud; 732 pgd_t *pgd; 733 734 if (!kvm->arch.pgtable) 735 return; 736 pgd = kvm->arch.pgtable; 737 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { 738 if (!pgd_present(*pgd)) 739 continue; 740 pud = pud_offset(pgd, 0); 741 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) { 742 if (!pud_present(*pud)) 743 continue; 744 if (pud_huge(*pud)) { 745 pud_clear(pud); 746 continue; 747 } 748 pmd = pmd_offset(pud, 0); 749 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) { 750 if (pmd_is_leaf(*pmd)) { 751 pmd_clear(pmd); 752 continue; 753 } 754 if (!pmd_present(*pmd)) 755 continue; 756 pte = pte_offset_map(pmd, 0); 757 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); 758 kvmppc_pte_free(pte); 759 pmd_clear(pmd); 760 } 761 pmd_free(kvm->mm, pmd_offset(pud, 0)); 762 pud_clear(pud); 763 } 764 pud_free(kvm->mm, pud_offset(pgd, 0)); 765 pgd_clear(pgd); 766 } 767 pgd_free(kvm->mm, kvm->arch.pgtable); 768 kvm->arch.pgtable = NULL; 769 } 770 771 static void pte_ctor(void *addr) 772 { 773 memset(addr, 0, PTE_TABLE_SIZE); 774 } 775 776 int kvmppc_radix_init(void) 777 { 778 unsigned long size = sizeof(void *) << PTE_INDEX_SIZE; 779 780 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor); 781 if (!kvm_pte_cache) 782 return -ENOMEM; 783 return 0; 784 } 785 786 void kvmppc_radix_exit(void) 787 { 788 kmem_cache_destroy(kvm_pte_cache); 789 } 790