1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2010 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #ifndef __ASM_KVM_BOOK3S_64_H__ 21 #define __ASM_KVM_BOOK3S_64_H__ 22 23 #include <linux/string.h> 24 #include <asm/bitops.h> 25 #include <asm/book3s/64/mmu-hash.h> 26 #include <asm/cpu_has_feature.h> 27 #include <asm/ppc-opcode.h> 28 29 #ifdef CONFIG_PPC_PSERIES 30 static inline bool kvmhv_on_pseries(void) 31 { 32 return !cpu_has_feature(CPU_FTR_HVMODE); 33 } 34 #else 35 static inline bool kvmhv_on_pseries(void) 36 { 37 return false; 38 } 39 #endif 40 41 /* 42 * Structure for a nested guest, that is, for a guest that is managed by 43 * one of our guests. 44 */ 45 struct kvm_nested_guest { 46 struct kvm *l1_host; /* L1 VM that owns this nested guest */ 47 int l1_lpid; /* lpid L1 guest thinks this guest is */ 48 int shadow_lpid; /* real lpid of this nested guest */ 49 pgd_t *shadow_pgtable; /* our page table for this guest */ 50 u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ 51 u64 process_table; /* process table entry for this guest */ 52 long refcnt; /* number of pointers to this struct */ 53 struct mutex tlb_lock; /* serialize page faults and tlbies */ 54 struct kvm_nested_guest *next; 55 cpumask_t need_tlb_flush; 56 cpumask_t cpu_in_guest; 57 short prev_cpu[NR_CPUS]; 58 }; 59 60 /* 61 * We define a nested rmap entry as a single 64-bit quantity 62 * 0xFFF0000000000000 12-bit lpid field 63 * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number 64 * 0x0000000000000001 1-bit single entry flag 65 */ 66 #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL 67 #define RMAP_NESTED_LPID_SHIFT (52) 68 #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL 69 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL 70 71 /* Structure for a nested guest rmap entry */ 72 struct rmap_nested { 73 struct llist_node list; 74 u64 rmap; 75 }; 76 77 /* 78 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries 79 * safe against removal of the list entry or NULL list 80 * @pos: a (struct rmap_nested *) to use as a loop cursor 81 * @node: pointer to the first entry 82 * NOTE: this can be NULL 83 * @rmapp: an (unsigned long *) in which to return the rmap entries on each 84 * iteration 85 * NOTE: this must point to already allocated memory 86 * 87 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the 88 * rmap entry in the memslot. The list is always terminated by a "single entry" 89 * stored in the list element of the final entry of the llist. If there is ONLY 90 * a single entry then this is itself in the rmap entry of the memslot, not a 91 * llist head pointer. 92 * 93 * Note that the iterator below assumes that a nested rmap entry is always 94 * non-zero. This is true for our usage because the LPID field is always 95 * non-zero (zero is reserved for the host). 96 * 97 * This should be used to iterate over the list of rmap_nested entries with 98 * processing done on the u64 rmap value given by each iteration. This is safe 99 * against removal of list entries and it is always safe to call free on (pos). 100 * 101 * e.g. 102 * struct rmap_nested *cursor; 103 * struct llist_node *first; 104 * unsigned long rmap; 105 * for_each_nest_rmap_safe(cursor, first, &rmap) { 106 * do_something(rmap); 107 * free(cursor); 108 * } 109 */ 110 #define for_each_nest_rmap_safe(pos, node, rmapp) \ 111 for ((pos) = llist_entry((node), typeof(*(pos)), list); \ 112 (node) && \ 113 (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ 114 ((u64) (node)) : ((pos)->rmap))) && \ 115 (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ 116 ((struct llist_node *) ((pos) = NULL)) : \ 117 (pos)->list.next)), true); \ 118 (pos) = llist_entry((node), typeof(*(pos)), list)) 119 120 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, 121 bool create); 122 void kvmhv_put_nested(struct kvm_nested_guest *gp); 123 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); 124 125 /* Encoding of first parameter for H_TLB_INVALIDATE */ 126 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ 127 ___PPC_R(r)) 128 129 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ 130 #define PPC_MIN_HPT_ORDER 18 131 #define PPC_MAX_HPT_ORDER 46 132 133 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 134 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 135 { 136 preempt_disable(); 137 return &get_paca()->shadow_vcpu; 138 } 139 140 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 141 { 142 preempt_enable(); 143 } 144 #endif 145 146 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 147 148 static inline bool kvm_is_radix(struct kvm *kvm) 149 { 150 return kvm->arch.radix; 151 } 152 153 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 154 #endif 155 156 /* 157 * We use a lock bit in HPTE dword 0 to synchronize updates and 158 * accesses to each HPTE, and another bit to indicate non-present 159 * HPTEs. 160 */ 161 #define HPTE_V_HVLOCK 0x40UL 162 #define HPTE_V_ABSENT 0x20UL 163 164 /* 165 * We use this bit in the guest_rpte field of the revmap entry 166 * to indicate a modified HPTE. 167 */ 168 #define HPTE_GR_MODIFIED (1ul << 62) 169 170 /* These bits are reserved in the guest view of the HPTE */ 171 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED 172 173 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits) 174 { 175 unsigned long tmp, old; 176 __be64 be_lockbit, be_bits; 177 178 /* 179 * We load/store in native endian, but the HTAB is in big endian. If 180 * we byte swap all data we apply on the PTE we're implicitly correct 181 * again. 182 */ 183 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK); 184 be_bits = cpu_to_be64(bits); 185 186 asm volatile(" ldarx %0,0,%2\n" 187 " and. %1,%0,%3\n" 188 " bne 2f\n" 189 " or %0,%0,%4\n" 190 " stdcx. %0,0,%2\n" 191 " beq+ 2f\n" 192 " mr %1,%3\n" 193 "2: isync" 194 : "=&r" (tmp), "=&r" (old) 195 : "r" (hpte), "r" (be_bits), "r" (be_lockbit) 196 : "cc", "memory"); 197 return old == 0; 198 } 199 200 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) 201 { 202 hpte_v &= ~HPTE_V_HVLOCK; 203 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 204 hpte[0] = cpu_to_be64(hpte_v); 205 } 206 207 /* Without barrier */ 208 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v) 209 { 210 hpte_v &= ~HPTE_V_HVLOCK; 211 hpte[0] = cpu_to_be64(hpte_v); 212 } 213 214 /* 215 * These functions encode knowledge of the POWER7/8/9 hardware 216 * interpretations of the HPTE LP (large page size) field. 217 */ 218 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) 219 { 220 unsigned int lphi; 221 222 if (!(h & HPTE_V_LARGE)) 223 return 12; /* 4kB */ 224 lphi = (l >> 16) & 0xf; 225 switch ((l >> 12) & 0xf) { 226 case 0: 227 return !lphi ? 24 : 0; /* 16MB */ 228 break; 229 case 1: 230 return 16; /* 64kB */ 231 break; 232 case 3: 233 return !lphi ? 34 : 0; /* 16GB */ 234 break; 235 case 7: 236 return (16 << 8) + 12; /* 64kB in 4kB */ 237 break; 238 case 8: 239 if (!lphi) 240 return (24 << 8) + 16; /* 16MB in 64kkB */ 241 if (lphi == 3) 242 return (24 << 8) + 12; /* 16MB in 4kB */ 243 break; 244 } 245 return 0; 246 } 247 248 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) 249 { 250 return kvmppc_hpte_page_shifts(h, l) & 0xff; 251 } 252 253 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l) 254 { 255 int tmp = kvmppc_hpte_page_shifts(h, l); 256 257 if (tmp >= 0x100) 258 tmp >>= 8; 259 return tmp; 260 } 261 262 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r) 263 { 264 int shift = kvmppc_hpte_actual_page_shift(v, r); 265 266 if (shift) 267 return 1ul << shift; 268 return 0; 269 } 270 271 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift) 272 { 273 switch (base_shift) { 274 case 12: 275 switch (actual_shift) { 276 case 12: 277 return 0; 278 case 16: 279 return 7; 280 case 24: 281 return 0x38; 282 } 283 break; 284 case 16: 285 switch (actual_shift) { 286 case 16: 287 return 1; 288 case 24: 289 return 8; 290 } 291 break; 292 case 24: 293 return 0; 294 } 295 return -1; 296 } 297 298 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 299 unsigned long pte_index) 300 { 301 int a_pgshift, b_pgshift; 302 unsigned long rb = 0, va_low, sllp; 303 304 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r); 305 if (a_pgshift >= 0x100) { 306 b_pgshift &= 0xff; 307 a_pgshift >>= 8; 308 } 309 310 /* 311 * Ignore the top 14 bits of va 312 * v have top two bits covering segment size, hence move 313 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. 314 * AVA field in v also have the lower 23 bits ignored. 315 * For base page size 4K we need 14 .. 65 bits (so need to 316 * collect extra 11 bits) 317 * For others we need 14..14+i 318 */ 319 /* This covers 14..54 bits of va*/ 320 rb = (v & ~0x7fUL) << 16; /* AVA field */ 321 322 /* 323 * AVA in v had cleared lower 23 bits. We need to derive 324 * that from pteg index 325 */ 326 va_low = pte_index >> 3; 327 if (v & HPTE_V_SECONDARY) 328 va_low = ~va_low; 329 /* 330 * get the vpn bits from va_low using reverse of hashing. 331 * In v we have va with 23 bits dropped and then left shifted 332 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need 333 * right shift it with (SID_SHIFT - (23 - 7)) 334 */ 335 if (!(v & HPTE_V_1TB_SEG)) 336 va_low ^= v >> (SID_SHIFT - 16); 337 else 338 va_low ^= v >> (SID_SHIFT_1T - 16); 339 va_low &= 0x7ff; 340 341 if (b_pgshift <= 12) { 342 if (a_pgshift > 12) { 343 sllp = (a_pgshift == 16) ? 5 : 4; 344 rb |= sllp << 5; /* AP field */ 345 } 346 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ 347 } else { 348 int aval_shift; 349 /* 350 * remaining bits of AVA/LP fields 351 * Also contain the rr bits of LP 352 */ 353 rb |= (va_low << b_pgshift) & 0x7ff000; 354 /* 355 * Now clear not needed LP bits based on actual psize 356 */ 357 rb &= ~((1ul << a_pgshift) - 1); 358 /* 359 * AVAL field 58..77 - base_page_shift bits of va 360 * we have space for 58..64 bits, Missing bits should 361 * be zero filled. +1 is to take care of L bit shift 362 */ 363 aval_shift = 64 - (77 - b_pgshift) + 1; 364 rb |= ((va_low << aval_shift) & 0xfe); 365 366 rb |= 1; /* L field */ 367 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */ 368 } 369 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */ 370 return rb; 371 } 372 373 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 374 { 375 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 376 } 377 378 static inline int hpte_is_writable(unsigned long ptel) 379 { 380 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP); 381 382 return pp != PP_RXRX && pp != PP_RXXX; 383 } 384 385 static inline unsigned long hpte_make_readonly(unsigned long ptel) 386 { 387 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX) 388 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX; 389 else 390 ptel |= PP_RXRX; 391 return ptel; 392 } 393 394 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci) 395 { 396 unsigned int wimg = hptel & HPTE_R_WIMG; 397 398 /* Handle SAO */ 399 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) && 400 cpu_has_feature(CPU_FTR_ARCH_206)) 401 wimg = HPTE_R_M; 402 403 if (!is_ci) 404 return wimg == HPTE_R_M; 405 /* 406 * if host is mapped cache inhibited, make sure hptel also have 407 * cache inhibited. 408 */ 409 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */ 410 return false; 411 return !!(wimg & HPTE_R_I); 412 } 413 414 /* 415 * If it's present and writable, atomically set dirty and referenced bits and 416 * return the PTE, otherwise return 0. 417 */ 418 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) 419 { 420 pte_t old_pte, new_pte = __pte(0); 421 422 while (1) { 423 /* 424 * Make sure we don't reload from ptep 425 */ 426 old_pte = READ_ONCE(*ptep); 427 /* 428 * wait until H_PAGE_BUSY is clear then set it atomically 429 */ 430 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) { 431 cpu_relax(); 432 continue; 433 } 434 /* If pte is not present return None */ 435 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) 436 return __pte(0); 437 438 new_pte = pte_mkyoung(old_pte); 439 if (writing && pte_write(old_pte)) 440 new_pte = pte_mkdirty(new_pte); 441 442 if (pte_xchg(ptep, old_pte, new_pte)) 443 break; 444 } 445 return new_pte; 446 } 447 448 static inline bool hpte_read_permission(unsigned long pp, unsigned long key) 449 { 450 if (key) 451 return PP_RWRX <= pp && pp <= PP_RXRX; 452 return true; 453 } 454 455 static inline bool hpte_write_permission(unsigned long pp, unsigned long key) 456 { 457 if (key) 458 return pp == PP_RWRW; 459 return pp <= PP_RWRW; 460 } 461 462 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr) 463 { 464 unsigned long skey; 465 466 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) | 467 ((hpte_r & HPTE_R_KEY_LO) >> 9); 468 return (amr >> (62 - 2 * skey)) & 3; 469 } 470 471 static inline void lock_rmap(unsigned long *rmap) 472 { 473 do { 474 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap)) 475 cpu_relax(); 476 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap)); 477 } 478 479 static inline void unlock_rmap(unsigned long *rmap) 480 { 481 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap); 482 } 483 484 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, 485 unsigned long pagesize) 486 { 487 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1; 488 489 if (pagesize <= PAGE_SIZE) 490 return true; 491 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); 492 } 493 494 /* 495 * This works for 4k, 64k and 16M pages on POWER7, 496 * and 4k and 16M pages on PPC970. 497 */ 498 static inline unsigned long slb_pgsize_encoding(unsigned long psize) 499 { 500 unsigned long senc = 0; 501 502 if (psize > 0x1000) { 503 senc = SLB_VSID_L; 504 if (psize == 0x10000) 505 senc |= SLB_VSID_LP_01; 506 } 507 return senc; 508 } 509 510 static inline int is_vrma_hpte(unsigned long hpte_v) 511 { 512 return (hpte_v & ~0xffffffUL) == 513 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 514 } 515 516 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 517 /* 518 * Note modification of an HPTE; set the HPTE modified bit 519 * if anyone is interested. 520 */ 521 static inline void note_hpte_modification(struct kvm *kvm, 522 struct revmap_entry *rev) 523 { 524 if (atomic_read(&kvm->arch.hpte_mod_interest)) 525 rev->guest_rpte |= HPTE_GR_MODIFIED; 526 } 527 528 /* 529 * Like kvm_memslots(), but for use in real mode when we can't do 530 * any RCU stuff (since the secondary threads are offline from the 531 * kernel's point of view), and we can't print anything. 532 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check(). 533 */ 534 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) 535 { 536 return rcu_dereference_raw_notrace(kvm->memslots[0]); 537 } 538 539 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); 540 extern void kvmhv_radix_debugfs_init(struct kvm *kvm); 541 542 extern void kvmhv_rm_send_ipi(int cpu); 543 544 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt) 545 { 546 /* HPTEs are 2**4 bytes long */ 547 return 1UL << (hpt->order - 4); 548 } 549 550 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt) 551 { 552 /* 128 (2**7) bytes in each HPTEG */ 553 return (1UL << (hpt->order - 7)) - 1; 554 } 555 556 /* Set bits in a dirty bitmap, which is in LE format */ 557 static inline void set_dirty_bits(unsigned long *map, unsigned long i, 558 unsigned long npages) 559 { 560 561 if (npages >= 8) 562 memset((char *)map + i / 8, 0xff, npages / 8); 563 else 564 for (; npages; ++i, --npages) 565 __set_bit_le(i, map); 566 } 567 568 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i, 569 unsigned long npages) 570 { 571 if (npages >= 8) 572 memset((char *)map + i / 8, 0xff, npages / 8); 573 else 574 for (; npages; ++i, --npages) 575 set_bit_le(i, map); 576 } 577 578 static inline u64 sanitize_msr(u64 msr) 579 { 580 msr &= ~MSR_HV; 581 msr |= MSR_ME; 582 return msr; 583 } 584 585 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 586 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu) 587 { 588 vcpu->arch.regs.ccr = vcpu->arch.cr_tm; 589 vcpu->arch.regs.xer = vcpu->arch.xer_tm; 590 vcpu->arch.regs.link = vcpu->arch.lr_tm; 591 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm; 592 vcpu->arch.amr = vcpu->arch.amr_tm; 593 vcpu->arch.ppr = vcpu->arch.ppr_tm; 594 vcpu->arch.dscr = vcpu->arch.dscr_tm; 595 vcpu->arch.tar = vcpu->arch.tar_tm; 596 memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm, 597 sizeof(vcpu->arch.regs.gpr)); 598 vcpu->arch.fp = vcpu->arch.fp_tm; 599 vcpu->arch.vr = vcpu->arch.vr_tm; 600 vcpu->arch.vrsave = vcpu->arch.vrsave_tm; 601 } 602 603 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu) 604 { 605 vcpu->arch.cr_tm = vcpu->arch.regs.ccr; 606 vcpu->arch.xer_tm = vcpu->arch.regs.xer; 607 vcpu->arch.lr_tm = vcpu->arch.regs.link; 608 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr; 609 vcpu->arch.amr_tm = vcpu->arch.amr; 610 vcpu->arch.ppr_tm = vcpu->arch.ppr; 611 vcpu->arch.dscr_tm = vcpu->arch.dscr; 612 vcpu->arch.tar_tm = vcpu->arch.tar; 613 memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr, 614 sizeof(vcpu->arch.regs.gpr)); 615 vcpu->arch.fp_tm = vcpu->arch.fp; 616 vcpu->arch.vr_tm = vcpu->arch.vr; 617 vcpu->arch.vrsave_tm = vcpu->arch.vrsave; 618 } 619 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 620 621 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, 622 unsigned long gpa, unsigned int level, 623 unsigned long mmu_seq, unsigned int lpid, 624 unsigned long *rmapp, struct rmap_nested **n_rmap); 625 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, 626 struct rmap_nested **n_rmap); 627 extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, 628 struct kvm_memory_slot *memslot, 629 unsigned long gpa, unsigned long hpa, 630 unsigned long nbytes); 631 632 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 633 634 #endif /* __ASM_KVM_BOOK3S_64_H__ */ 635