1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/string.h> 11 #include <linux/kvm.h> 12 #include <linux/kvm_host.h> 13 #include <linux/hugetlb.h> 14 #include <linux/module.h> 15 16 #include <asm/tlbflush.h> 17 #include <asm/kvm_ppc.h> 18 #include <asm/kvm_book3s.h> 19 #include <asm/mmu-hash64.h> 20 #include <asm/hvcall.h> 21 #include <asm/synch.h> 22 #include <asm/ppc-opcode.h> 23 24 /* Translate address of a vmalloc'd thing to a linear map address */ 25 static void *real_vmalloc_addr(void *x) 26 { 27 unsigned long addr = (unsigned long) x; 28 pte_t *p; 29 30 p = find_linux_pte(swapper_pg_dir, addr); 31 if (!p || !pte_present(*p)) 32 return NULL; 33 /* assume we don't have huge pages in vmalloc space... */ 34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK); 35 return __va(addr); 36 } 37 38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ 39 static int global_invalidates(struct kvm *kvm, unsigned long flags) 40 { 41 int global; 42 43 /* 44 * If there is only one vcore, and it's currently running, 45 * we can use tlbiel as long as we mark all other physical 46 * cores as potentially having stale TLB entries for this lpid. 47 * If we're not using MMU notifiers, we never take pages away 48 * from the guest, so we can use tlbiel if requested. 49 * Otherwise, don't use tlbiel. 50 */ 51 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore) 52 global = 0; 53 else if (kvm->arch.using_mmu_notifiers) 54 global = 1; 55 else 56 global = !(flags & H_LOCAL); 57 58 if (!global) { 59 /* any other core might now have stale TLB entries... */ 60 smp_wmb(); 61 cpumask_setall(&kvm->arch.need_tlb_flush); 62 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu, 63 &kvm->arch.need_tlb_flush); 64 } 65 66 return global; 67 } 68 69 /* 70 * Add this HPTE into the chain for the real page. 71 * Must be called with the chain locked; it unlocks the chain. 72 */ 73 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 74 unsigned long *rmap, long pte_index, int realmode) 75 { 76 struct revmap_entry *head, *tail; 77 unsigned long i; 78 79 if (*rmap & KVMPPC_RMAP_PRESENT) { 80 i = *rmap & KVMPPC_RMAP_INDEX; 81 head = &kvm->arch.revmap[i]; 82 if (realmode) 83 head = real_vmalloc_addr(head); 84 tail = &kvm->arch.revmap[head->back]; 85 if (realmode) 86 tail = real_vmalloc_addr(tail); 87 rev->forw = i; 88 rev->back = head->back; 89 tail->forw = pte_index; 90 head->back = pte_index; 91 } else { 92 rev->forw = rev->back = pte_index; 93 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | 94 pte_index | KVMPPC_RMAP_PRESENT; 95 } 96 unlock_rmap(rmap); 97 } 98 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); 99 100 /* Remove this HPTE from the chain for a real page */ 101 static void remove_revmap_chain(struct kvm *kvm, long pte_index, 102 struct revmap_entry *rev, 103 unsigned long hpte_v, unsigned long hpte_r) 104 { 105 struct revmap_entry *next, *prev; 106 unsigned long gfn, ptel, head; 107 struct kvm_memory_slot *memslot; 108 unsigned long *rmap; 109 unsigned long rcbits; 110 111 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); 112 ptel = rev->guest_rpte |= rcbits; 113 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); 114 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 115 if (!memslot) 116 return; 117 118 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); 119 lock_rmap(rmap); 120 121 head = *rmap & KVMPPC_RMAP_INDEX; 122 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]); 123 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]); 124 next->back = rev->back; 125 prev->forw = rev->forw; 126 if (head == pte_index) { 127 head = rev->forw; 128 if (head == pte_index) 129 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); 130 else 131 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; 132 } 133 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; 134 unlock_rmap(rmap); 135 } 136 137 static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, 138 int writing, unsigned long *pte_sizep) 139 { 140 pte_t *ptep; 141 unsigned long ps = *pte_sizep; 142 unsigned int shift; 143 144 ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); 145 if (!ptep) 146 return __pte(0); 147 if (shift) 148 *pte_sizep = 1ul << shift; 149 else 150 *pte_sizep = PAGE_SIZE; 151 if (ps > *pte_sizep) 152 return __pte(0); 153 if (!pte_present(*ptep)) 154 return __pte(0); 155 return kvmppc_read_update_linux_pte(ptep, writing); 156 } 157 158 static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v) 159 { 160 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 161 hpte[0] = hpte_v; 162 } 163 164 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 165 long pte_index, unsigned long pteh, unsigned long ptel, 166 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) 167 { 168 unsigned long i, pa, gpa, gfn, psize; 169 unsigned long slot_fn, hva; 170 unsigned long *hpte; 171 struct revmap_entry *rev; 172 unsigned long g_ptel; 173 struct kvm_memory_slot *memslot; 174 unsigned long *physp, pte_size; 175 unsigned long is_io; 176 unsigned long *rmap; 177 pte_t pte; 178 unsigned int writing; 179 unsigned long mmu_seq; 180 unsigned long rcbits; 181 182 psize = hpte_page_size(pteh, ptel); 183 if (!psize) 184 return H_PARAMETER; 185 writing = hpte_is_writable(ptel); 186 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); 187 ptel &= ~HPTE_GR_RESERVED; 188 g_ptel = ptel; 189 190 /* used later to detect if we might have been invalidated */ 191 mmu_seq = kvm->mmu_notifier_seq; 192 smp_rmb(); 193 194 /* Find the memslot (if any) for this address */ 195 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); 196 gfn = gpa >> PAGE_SHIFT; 197 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 198 pa = 0; 199 is_io = ~0ul; 200 rmap = NULL; 201 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { 202 /* PPC970 can't do emulated MMIO */ 203 if (!cpu_has_feature(CPU_FTR_ARCH_206)) 204 return H_PARAMETER; 205 /* Emulated MMIO - mark this with key=31 */ 206 pteh |= HPTE_V_ABSENT; 207 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; 208 goto do_insert; 209 } 210 211 /* Check if the requested page fits entirely in the memslot. */ 212 if (!slot_is_aligned(memslot, psize)) 213 return H_PARAMETER; 214 slot_fn = gfn - memslot->base_gfn; 215 rmap = &memslot->arch.rmap[slot_fn]; 216 217 if (!kvm->arch.using_mmu_notifiers) { 218 physp = memslot->arch.slot_phys; 219 if (!physp) 220 return H_PARAMETER; 221 physp += slot_fn; 222 if (realmode) 223 physp = real_vmalloc_addr(physp); 224 pa = *physp; 225 if (!pa) 226 return H_TOO_HARD; 227 is_io = pa & (HPTE_R_I | HPTE_R_W); 228 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 229 pa &= PAGE_MASK; 230 } else { 231 /* Translate to host virtual address */ 232 hva = __gfn_to_hva_memslot(memslot, gfn); 233 234 /* Look up the Linux PTE for the backing page */ 235 pte_size = psize; 236 pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); 237 if (pte_present(pte)) { 238 if (writing && !pte_write(pte)) 239 /* make the actual HPTE be read-only */ 240 ptel = hpte_make_readonly(ptel); 241 is_io = hpte_cache_bits(pte_val(pte)); 242 pa = pte_pfn(pte) << PAGE_SHIFT; 243 } 244 } 245 246 if (pte_size < psize) 247 return H_PARAMETER; 248 if (pa && pte_size > psize) 249 pa |= gpa & (pte_size - 1); 250 251 ptel &= ~(HPTE_R_PP0 - psize); 252 ptel |= pa; 253 254 if (pa) 255 pteh |= HPTE_V_VALID; 256 else 257 pteh |= HPTE_V_ABSENT; 258 259 /* Check WIMG */ 260 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) { 261 if (is_io) 262 return H_PARAMETER; 263 /* 264 * Allow guest to map emulated device memory as 265 * uncacheable, but actually make it cacheable. 266 */ 267 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); 268 ptel |= HPTE_R_M; 269 } 270 271 /* Find and lock the HPTEG slot to use */ 272 do_insert: 273 if (pte_index >= kvm->arch.hpt_npte) 274 return H_PARAMETER; 275 if (likely((flags & H_EXACT) == 0)) { 276 pte_index &= ~7UL; 277 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 278 for (i = 0; i < 8; ++i) { 279 if ((*hpte & HPTE_V_VALID) == 0 && 280 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 281 HPTE_V_ABSENT)) 282 break; 283 hpte += 2; 284 } 285 if (i == 8) { 286 /* 287 * Since try_lock_hpte doesn't retry (not even stdcx. 288 * failures), it could be that there is a free slot 289 * but we transiently failed to lock it. Try again, 290 * actually locking each slot and checking it. 291 */ 292 hpte -= 16; 293 for (i = 0; i < 8; ++i) { 294 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 295 cpu_relax(); 296 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT))) 297 break; 298 *hpte &= ~HPTE_V_HVLOCK; 299 hpte += 2; 300 } 301 if (i == 8) 302 return H_PTEG_FULL; 303 } 304 pte_index += i; 305 } else { 306 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 307 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | 308 HPTE_V_ABSENT)) { 309 /* Lock the slot and check again */ 310 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 311 cpu_relax(); 312 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { 313 *hpte &= ~HPTE_V_HVLOCK; 314 return H_PTEG_FULL; 315 } 316 } 317 } 318 319 /* Save away the guest's idea of the second HPTE dword */ 320 rev = &kvm->arch.revmap[pte_index]; 321 if (realmode) 322 rev = real_vmalloc_addr(rev); 323 if (rev) { 324 rev->guest_rpte = g_ptel; 325 note_hpte_modification(kvm, rev); 326 } 327 328 /* Link HPTE into reverse-map chain */ 329 if (pteh & HPTE_V_VALID) { 330 if (realmode) 331 rmap = real_vmalloc_addr(rmap); 332 lock_rmap(rmap); 333 /* Check for pending invalidations under the rmap chain lock */ 334 if (kvm->arch.using_mmu_notifiers && 335 mmu_notifier_retry(kvm, mmu_seq)) { 336 /* inval in progress, write a non-present HPTE */ 337 pteh |= HPTE_V_ABSENT; 338 pteh &= ~HPTE_V_VALID; 339 unlock_rmap(rmap); 340 } else { 341 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, 342 realmode); 343 /* Only set R/C in real HPTE if already set in *rmap */ 344 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; 345 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); 346 } 347 } 348 349 hpte[1] = ptel; 350 351 /* Write the first HPTE dword, unlocking the HPTE and making it valid */ 352 eieio(); 353 hpte[0] = pteh; 354 asm volatile("ptesync" : : : "memory"); 355 356 *pte_idx_ret = pte_index; 357 return H_SUCCESS; 358 } 359 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); 360 361 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 362 long pte_index, unsigned long pteh, unsigned long ptel) 363 { 364 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, 365 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); 366 } 367 368 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 369 370 static inline int try_lock_tlbie(unsigned int *lock) 371 { 372 unsigned int tmp, old; 373 unsigned int token = LOCK_TOKEN; 374 375 asm volatile("1:lwarx %1,0,%2\n" 376 " cmpwi cr0,%1,0\n" 377 " bne 2f\n" 378 " stwcx. %3,0,%2\n" 379 " bne- 1b\n" 380 " isync\n" 381 "2:" 382 : "=&r" (tmp), "=&r" (old) 383 : "r" (lock), "r" (token) 384 : "cc", "memory"); 385 return old == 0; 386 } 387 388 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 389 unsigned long pte_index, unsigned long avpn, 390 unsigned long *hpret) 391 { 392 unsigned long *hpte; 393 unsigned long v, r, rb; 394 struct revmap_entry *rev; 395 396 if (pte_index >= kvm->arch.hpt_npte) 397 return H_PARAMETER; 398 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 399 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 400 cpu_relax(); 401 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 402 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || 403 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { 404 hpte[0] &= ~HPTE_V_HVLOCK; 405 return H_NOT_FOUND; 406 } 407 408 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 409 v = hpte[0] & ~HPTE_V_HVLOCK; 410 if (v & HPTE_V_VALID) { 411 hpte[0] &= ~HPTE_V_VALID; 412 rb = compute_tlbie_rb(v, hpte[1], pte_index); 413 if (global_invalidates(kvm, flags)) { 414 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 415 cpu_relax(); 416 asm volatile("ptesync" : : : "memory"); 417 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 418 : : "r" (rb), "r" (kvm->arch.lpid)); 419 asm volatile("ptesync" : : : "memory"); 420 kvm->arch.tlbie_lock = 0; 421 } else { 422 asm volatile("ptesync" : : : "memory"); 423 asm volatile("tlbiel %0" : : "r" (rb)); 424 asm volatile("ptesync" : : : "memory"); 425 } 426 /* Read PTE low word after tlbie to get final R/C values */ 427 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 428 } 429 r = rev->guest_rpte & ~HPTE_GR_RESERVED; 430 note_hpte_modification(kvm, rev); 431 unlock_hpte(hpte, 0); 432 433 hpret[0] = v; 434 hpret[1] = r; 435 return H_SUCCESS; 436 } 437 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove); 438 439 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, 440 unsigned long pte_index, unsigned long avpn) 441 { 442 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, 443 &vcpu->arch.gpr[4]); 444 } 445 446 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) 447 { 448 struct kvm *kvm = vcpu->kvm; 449 unsigned long *args = &vcpu->arch.gpr[4]; 450 unsigned long *hp, *hptes[4], tlbrb[4]; 451 long int i, j, k, n, found, indexes[4]; 452 unsigned long flags, req, pte_index, rcbits; 453 long int local = 0; 454 long int ret = H_SUCCESS; 455 struct revmap_entry *rev, *revs[4]; 456 457 if (atomic_read(&kvm->online_vcpus) == 1) 458 local = 1; 459 for (i = 0; i < 4 && ret == H_SUCCESS; ) { 460 n = 0; 461 for (; i < 4; ++i) { 462 j = i * 2; 463 pte_index = args[j]; 464 flags = pte_index >> 56; 465 pte_index &= ((1ul << 56) - 1); 466 req = flags >> 6; 467 flags &= 3; 468 if (req == 3) { /* no more requests */ 469 i = 4; 470 break; 471 } 472 if (req != 1 || flags == 3 || 473 pte_index >= kvm->arch.hpt_npte) { 474 /* parameter error */ 475 args[j] = ((0xa0 | flags) << 56) + pte_index; 476 ret = H_PARAMETER; 477 break; 478 } 479 hp = (unsigned long *) 480 (kvm->arch.hpt_virt + (pte_index << 4)); 481 /* to avoid deadlock, don't spin except for first */ 482 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { 483 if (n) 484 break; 485 while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) 486 cpu_relax(); 487 } 488 found = 0; 489 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) { 490 switch (flags & 3) { 491 case 0: /* absolute */ 492 found = 1; 493 break; 494 case 1: /* andcond */ 495 if (!(hp[0] & args[j + 1])) 496 found = 1; 497 break; 498 case 2: /* AVPN */ 499 if ((hp[0] & ~0x7fUL) == args[j + 1]) 500 found = 1; 501 break; 502 } 503 } 504 if (!found) { 505 hp[0] &= ~HPTE_V_HVLOCK; 506 args[j] = ((0x90 | flags) << 56) + pte_index; 507 continue; 508 } 509 510 args[j] = ((0x80 | flags) << 56) + pte_index; 511 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 512 note_hpte_modification(kvm, rev); 513 514 if (!(hp[0] & HPTE_V_VALID)) { 515 /* insert R and C bits from PTE */ 516 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 517 args[j] |= rcbits << (56 - 5); 518 hp[0] = 0; 519 continue; 520 } 521 522 hp[0] &= ~HPTE_V_VALID; /* leave it locked */ 523 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index); 524 indexes[n] = j; 525 hptes[n] = hp; 526 revs[n] = rev; 527 ++n; 528 } 529 530 if (!n) 531 break; 532 533 /* Now that we've collected a batch, do the tlbies */ 534 if (!local) { 535 while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) 536 cpu_relax(); 537 asm volatile("ptesync" : : : "memory"); 538 for (k = 0; k < n; ++k) 539 asm volatile(PPC_TLBIE(%1,%0) : : 540 "r" (tlbrb[k]), 541 "r" (kvm->arch.lpid)); 542 asm volatile("eieio; tlbsync; ptesync" : : : "memory"); 543 kvm->arch.tlbie_lock = 0; 544 } else { 545 asm volatile("ptesync" : : : "memory"); 546 for (k = 0; k < n; ++k) 547 asm volatile("tlbiel %0" : : "r" (tlbrb[k])); 548 asm volatile("ptesync" : : : "memory"); 549 } 550 551 /* Read PTE low words after tlbie to get final R/C values */ 552 for (k = 0; k < n; ++k) { 553 j = indexes[k]; 554 pte_index = args[j] & ((1ul << 56) - 1); 555 hp = hptes[k]; 556 rev = revs[k]; 557 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]); 558 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); 559 args[j] |= rcbits << (56 - 5); 560 hp[0] = 0; 561 } 562 } 563 564 return ret; 565 } 566 567 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, 568 unsigned long pte_index, unsigned long avpn, 569 unsigned long va) 570 { 571 struct kvm *kvm = vcpu->kvm; 572 unsigned long *hpte; 573 struct revmap_entry *rev; 574 unsigned long v, r, rb, mask, bits; 575 576 if (pte_index >= kvm->arch.hpt_npte) 577 return H_PARAMETER; 578 579 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 580 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) 581 cpu_relax(); 582 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || 583 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { 584 hpte[0] &= ~HPTE_V_HVLOCK; 585 return H_NOT_FOUND; 586 } 587 588 v = hpte[0]; 589 bits = (flags << 55) & HPTE_R_PP0; 590 bits |= (flags << 48) & HPTE_R_KEY_HI; 591 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); 592 593 /* Update guest view of 2nd HPTE dword */ 594 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | 595 HPTE_R_KEY_HI | HPTE_R_KEY_LO; 596 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 597 if (rev) { 598 r = (rev->guest_rpte & ~mask) | bits; 599 rev->guest_rpte = r; 600 note_hpte_modification(kvm, rev); 601 } 602 r = (hpte[1] & ~mask) | bits; 603 604 /* Update HPTE */ 605 if (v & HPTE_V_VALID) { 606 rb = compute_tlbie_rb(v, r, pte_index); 607 hpte[0] = v & ~HPTE_V_VALID; 608 if (global_invalidates(kvm, flags)) { 609 while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) 610 cpu_relax(); 611 asm volatile("ptesync" : : : "memory"); 612 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 613 : : "r" (rb), "r" (kvm->arch.lpid)); 614 asm volatile("ptesync" : : : "memory"); 615 kvm->arch.tlbie_lock = 0; 616 } else { 617 asm volatile("ptesync" : : : "memory"); 618 asm volatile("tlbiel %0" : : "r" (rb)); 619 asm volatile("ptesync" : : : "memory"); 620 } 621 /* 622 * If the host has this page as readonly but the guest 623 * wants to make it read/write, reduce the permissions. 624 * Checking the host permissions involves finding the 625 * memslot and then the Linux PTE for the page. 626 */ 627 if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) { 628 unsigned long psize, gfn, hva; 629 struct kvm_memory_slot *memslot; 630 pgd_t *pgdir = vcpu->arch.pgdir; 631 pte_t pte; 632 633 psize = hpte_page_size(v, r); 634 gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 635 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 636 if (memslot) { 637 hva = __gfn_to_hva_memslot(memslot, gfn); 638 pte = lookup_linux_pte(pgdir, hva, 1, &psize); 639 if (pte_present(pte) && !pte_write(pte)) 640 r = hpte_make_readonly(r); 641 } 642 } 643 } 644 hpte[1] = r; 645 eieio(); 646 hpte[0] = v & ~HPTE_V_HVLOCK; 647 asm volatile("ptesync" : : : "memory"); 648 return H_SUCCESS; 649 } 650 651 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, 652 unsigned long pte_index) 653 { 654 struct kvm *kvm = vcpu->kvm; 655 unsigned long *hpte, v, r; 656 int i, n = 1; 657 struct revmap_entry *rev = NULL; 658 659 if (pte_index >= kvm->arch.hpt_npte) 660 return H_PARAMETER; 661 if (flags & H_READ_4) { 662 pte_index &= ~3; 663 n = 4; 664 } 665 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]); 666 for (i = 0; i < n; ++i, ++pte_index) { 667 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 668 v = hpte[0] & ~HPTE_V_HVLOCK; 669 r = hpte[1]; 670 if (v & HPTE_V_ABSENT) { 671 v &= ~HPTE_V_ABSENT; 672 v |= HPTE_V_VALID; 673 } 674 if (v & HPTE_V_VALID) { 675 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); 676 r &= ~HPTE_GR_RESERVED; 677 } 678 vcpu->arch.gpr[4 + i * 2] = v; 679 vcpu->arch.gpr[5 + i * 2] = r; 680 } 681 return H_SUCCESS; 682 } 683 684 void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, 685 unsigned long pte_index) 686 { 687 unsigned long rb; 688 689 hptep[0] &= ~HPTE_V_VALID; 690 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 691 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 692 cpu_relax(); 693 asm volatile("ptesync" : : : "memory"); 694 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 695 : : "r" (rb), "r" (kvm->arch.lpid)); 696 asm volatile("ptesync" : : : "memory"); 697 kvm->arch.tlbie_lock = 0; 698 } 699 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 700 701 void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, 702 unsigned long pte_index) 703 { 704 unsigned long rb; 705 unsigned char rbyte; 706 707 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 708 rbyte = (hptep[1] & ~HPTE_R_R) >> 8; 709 /* modify only the second-last byte, which contains the ref bit */ 710 *((char *)hptep + 14) = rbyte; 711 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 712 cpu_relax(); 713 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 714 : : "r" (rb), "r" (kvm->arch.lpid)); 715 asm volatile("ptesync" : : : "memory"); 716 kvm->arch.tlbie_lock = 0; 717 } 718 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); 719 720 static int slb_base_page_shift[4] = { 721 24, /* 16M */ 722 16, /* 64k */ 723 34, /* 16G */ 724 20, /* 1M, unsupported */ 725 }; 726 727 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, 728 unsigned long valid) 729 { 730 unsigned int i; 731 unsigned int pshift; 732 unsigned long somask; 733 unsigned long vsid, hash; 734 unsigned long avpn; 735 unsigned long *hpte; 736 unsigned long mask, val; 737 unsigned long v, r; 738 739 /* Get page shift, work out hash and AVPN etc. */ 740 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; 741 val = 0; 742 pshift = 12; 743 if (slb_v & SLB_VSID_L) { 744 mask |= HPTE_V_LARGE; 745 val |= HPTE_V_LARGE; 746 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; 747 } 748 if (slb_v & SLB_VSID_B_1T) { 749 somask = (1UL << 40) - 1; 750 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; 751 vsid ^= vsid << 25; 752 } else { 753 somask = (1UL << 28) - 1; 754 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; 755 } 756 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; 757 avpn = slb_v & ~(somask >> 16); /* also includes B */ 758 avpn |= (eaddr & somask) >> 16; 759 760 if (pshift >= 24) 761 avpn &= ~((1UL << (pshift - 16)) - 1); 762 else 763 avpn &= ~0x7fUL; 764 val |= avpn; 765 766 for (;;) { 767 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7)); 768 769 for (i = 0; i < 16; i += 2) { 770 /* Read the PTE racily */ 771 v = hpte[i] & ~HPTE_V_HVLOCK; 772 773 /* Check valid/absent, hash, segment size and AVPN */ 774 if (!(v & valid) || (v & mask) != val) 775 continue; 776 777 /* Lock the PTE and read it under the lock */ 778 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) 779 cpu_relax(); 780 v = hpte[i] & ~HPTE_V_HVLOCK; 781 r = hpte[i+1]; 782 783 /* 784 * Check the HPTE again, including large page size 785 * Since we don't currently allow any MPSS (mixed 786 * page-size segment) page sizes, it is sufficient 787 * to check against the actual page size. 788 */ 789 if ((v & valid) && (v & mask) == val && 790 hpte_page_size(v, r) == (1ul << pshift)) 791 /* Return with the HPTE still locked */ 792 return (hash << 3) + (i >> 1); 793 794 /* Unlock and move on */ 795 hpte[i] = v; 796 } 797 798 if (val & HPTE_V_SECONDARY) 799 break; 800 val |= HPTE_V_SECONDARY; 801 hash = hash ^ kvm->arch.hpt_mask; 802 } 803 return -1; 804 } 805 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); 806 807 /* 808 * Called in real mode to check whether an HPTE not found fault 809 * is due to accessing a paged-out page or an emulated MMIO page, 810 * or if a protection fault is due to accessing a page that the 811 * guest wanted read/write access to but which we made read-only. 812 * Returns a possibly modified status (DSISR) value if not 813 * (i.e. pass the interrupt to the guest), 814 * -1 to pass the fault up to host kernel mode code, -2 to do that 815 * and also load the instruction word (for MMIO emulation), 816 * or 0 if we should make the guest retry the access. 817 */ 818 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, 819 unsigned long slb_v, unsigned int status, bool data) 820 { 821 struct kvm *kvm = vcpu->kvm; 822 long int index; 823 unsigned long v, r, gr; 824 unsigned long *hpte; 825 unsigned long valid; 826 struct revmap_entry *rev; 827 unsigned long pp, key; 828 829 /* For protection fault, expect to find a valid HPTE */ 830 valid = HPTE_V_VALID; 831 if (status & DSISR_NOHPTE) 832 valid |= HPTE_V_ABSENT; 833 834 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); 835 if (index < 0) { 836 if (status & DSISR_NOHPTE) 837 return status; /* there really was no HPTE */ 838 return 0; /* for prot fault, HPTE disappeared */ 839 } 840 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 841 v = hpte[0] & ~HPTE_V_HVLOCK; 842 r = hpte[1]; 843 rev = real_vmalloc_addr(&kvm->arch.revmap[index]); 844 gr = rev->guest_rpte; 845 846 unlock_hpte(hpte, v); 847 848 /* For not found, if the HPTE is valid by now, retry the instruction */ 849 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) 850 return 0; 851 852 /* Check access permissions to the page */ 853 pp = gr & (HPTE_R_PP0 | HPTE_R_PP); 854 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; 855 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ 856 if (!data) { 857 if (gr & (HPTE_R_N | HPTE_R_G)) 858 return status | SRR1_ISI_N_OR_G; 859 if (!hpte_read_permission(pp, slb_v & key)) 860 return status | SRR1_ISI_PROT; 861 } else if (status & DSISR_ISSTORE) { 862 /* check write permission */ 863 if (!hpte_write_permission(pp, slb_v & key)) 864 return status | DSISR_PROTFAULT; 865 } else { 866 if (!hpte_read_permission(pp, slb_v & key)) 867 return status | DSISR_PROTFAULT; 868 } 869 870 /* Check storage key, if applicable */ 871 if (data && (vcpu->arch.shregs.msr & MSR_DR)) { 872 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); 873 if (status & DSISR_ISSTORE) 874 perm >>= 1; 875 if (perm & 1) 876 return status | DSISR_KEYFAULT; 877 } 878 879 /* Save HPTE info for virtual-mode handler */ 880 vcpu->arch.pgfault_addr = addr; 881 vcpu->arch.pgfault_index = index; 882 vcpu->arch.pgfault_hpte[0] = v; 883 vcpu->arch.pgfault_hpte[1] = r; 884 885 /* Check the storage key to see if it is possibly emulated MMIO */ 886 if (data && (vcpu->arch.shregs.msr & MSR_IR) && 887 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == 888 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) 889 return -2; /* MMIO emulation - load instr word */ 890 891 return -1; /* send fault up to host kernel mode */ 892 } 893