1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2010 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #ifndef __ASM_KVM_BOOK3S_64_H__ 21 #define __ASM_KVM_BOOK3S_64_H__ 22 23 #include <linux/string.h> 24 #include <asm/bitops.h> 25 #include <asm/book3s/64/mmu-hash.h> 26 27 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ 28 #define PPC_MIN_HPT_ORDER 18 29 #define PPC_MAX_HPT_ORDER 46 30 31 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 32 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) 33 { 34 preempt_disable(); 35 return &get_paca()->shadow_vcpu; 36 } 37 38 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) 39 { 40 preempt_enable(); 41 } 42 #endif 43 44 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 45 46 static inline bool kvm_is_radix(struct kvm *kvm) 47 { 48 return kvm->arch.radix; 49 } 50 51 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 52 #endif 53 54 /* 55 * We use a lock bit in HPTE dword 0 to synchronize updates and 56 * accesses to each HPTE, and another bit to indicate non-present 57 * HPTEs. 58 */ 59 #define HPTE_V_HVLOCK 0x40UL 60 #define HPTE_V_ABSENT 0x20UL 61 62 /* 63 * We use this bit in the guest_rpte field of the revmap entry 64 * to indicate a modified HPTE. 65 */ 66 #define HPTE_GR_MODIFIED (1ul << 62) 67 68 /* These bits are reserved in the guest view of the HPTE */ 69 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED 70 71 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits) 72 { 73 unsigned long tmp, old; 74 __be64 be_lockbit, be_bits; 75 76 /* 77 * We load/store in native endian, but the HTAB is in big endian. If 78 * we byte swap all data we apply on the PTE we're implicitly correct 79 * again. 80 */ 81 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK); 82 be_bits = cpu_to_be64(bits); 83 84 asm volatile(" ldarx %0,0,%2\n" 85 " and. %1,%0,%3\n" 86 " bne 2f\n" 87 " or %0,%0,%4\n" 88 " stdcx. %0,0,%2\n" 89 " beq+ 2f\n" 90 " mr %1,%3\n" 91 "2: isync" 92 : "=&r" (tmp), "=&r" (old) 93 : "r" (hpte), "r" (be_bits), "r" (be_lockbit) 94 : "cc", "memory"); 95 return old == 0; 96 } 97 98 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v) 99 { 100 hpte_v &= ~HPTE_V_HVLOCK; 101 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); 102 hpte[0] = cpu_to_be64(hpte_v); 103 } 104 105 /* Without barrier */ 106 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v) 107 { 108 hpte_v &= ~HPTE_V_HVLOCK; 109 hpte[0] = cpu_to_be64(hpte_v); 110 } 111 112 /* 113 * These functions encode knowledge of the POWER7/8/9 hardware 114 * interpretations of the HPTE LP (large page size) field. 115 */ 116 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l) 117 { 118 unsigned int lphi; 119 120 if (!(h & HPTE_V_LARGE)) 121 return 12; /* 4kB */ 122 lphi = (l >> 16) & 0xf; 123 switch ((l >> 12) & 0xf) { 124 case 0: 125 return !lphi ? 24 : 0; /* 16MB */ 126 break; 127 case 1: 128 return 16; /* 64kB */ 129 break; 130 case 3: 131 return !lphi ? 34 : 0; /* 16GB */ 132 break; 133 case 7: 134 return (16 << 8) + 12; /* 64kB in 4kB */ 135 break; 136 case 8: 137 if (!lphi) 138 return (24 << 8) + 16; /* 16MB in 64kkB */ 139 if (lphi == 3) 140 return (24 << 8) + 12; /* 16MB in 4kB */ 141 break; 142 } 143 return 0; 144 } 145 146 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l) 147 { 148 return kvmppc_hpte_page_shifts(h, l) & 0xff; 149 } 150 151 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l) 152 { 153 int tmp = kvmppc_hpte_page_shifts(h, l); 154 155 if (tmp >= 0x100) 156 tmp >>= 8; 157 return tmp; 158 } 159 160 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r) 161 { 162 int shift = kvmppc_hpte_actual_page_shift(v, r); 163 164 if (shift) 165 return 1ul << shift; 166 return 0; 167 } 168 169 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift) 170 { 171 switch (base_shift) { 172 case 12: 173 switch (actual_shift) { 174 case 12: 175 return 0; 176 case 16: 177 return 7; 178 case 24: 179 return 0x38; 180 } 181 break; 182 case 16: 183 switch (actual_shift) { 184 case 16: 185 return 1; 186 case 24: 187 return 8; 188 } 189 break; 190 case 24: 191 return 0; 192 } 193 return -1; 194 } 195 196 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 197 unsigned long pte_index) 198 { 199 int a_pgshift, b_pgshift; 200 unsigned long rb = 0, va_low, sllp; 201 202 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r); 203 if (a_pgshift >= 0x100) { 204 b_pgshift &= 0xff; 205 a_pgshift >>= 8; 206 } 207 208 /* 209 * Ignore the top 14 bits of va 210 * v have top two bits covering segment size, hence move 211 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits. 212 * AVA field in v also have the lower 23 bits ignored. 213 * For base page size 4K we need 14 .. 65 bits (so need to 214 * collect extra 11 bits) 215 * For others we need 14..14+i 216 */ 217 /* This covers 14..54 bits of va*/ 218 rb = (v & ~0x7fUL) << 16; /* AVA field */ 219 220 /* 221 * AVA in v had cleared lower 23 bits. We need to derive 222 * that from pteg index 223 */ 224 va_low = pte_index >> 3; 225 if (v & HPTE_V_SECONDARY) 226 va_low = ~va_low; 227 /* 228 * get the vpn bits from va_low using reverse of hashing. 229 * In v we have va with 23 bits dropped and then left shifted 230 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need 231 * right shift it with (SID_SHIFT - (23 - 7)) 232 */ 233 if (!(v & HPTE_V_1TB_SEG)) 234 va_low ^= v >> (SID_SHIFT - 16); 235 else 236 va_low ^= v >> (SID_SHIFT_1T - 16); 237 va_low &= 0x7ff; 238 239 if (b_pgshift <= 12) { 240 if (a_pgshift > 12) { 241 sllp = (a_pgshift == 16) ? 5 : 4; 242 rb |= sllp << 5; /* AP field */ 243 } 244 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */ 245 } else { 246 int aval_shift; 247 /* 248 * remaining bits of AVA/LP fields 249 * Also contain the rr bits of LP 250 */ 251 rb |= (va_low << b_pgshift) & 0x7ff000; 252 /* 253 * Now clear not needed LP bits based on actual psize 254 */ 255 rb &= ~((1ul << a_pgshift) - 1); 256 /* 257 * AVAL field 58..77 - base_page_shift bits of va 258 * we have space for 58..64 bits, Missing bits should 259 * be zero filled. +1 is to take care of L bit shift 260 */ 261 aval_shift = 64 - (77 - b_pgshift) + 1; 262 rb |= ((va_low << aval_shift) & 0xfe); 263 264 rb |= 1; /* L field */ 265 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */ 266 } 267 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */ 268 return rb; 269 } 270 271 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 272 { 273 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 274 } 275 276 static inline int hpte_is_writable(unsigned long ptel) 277 { 278 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP); 279 280 return pp != PP_RXRX && pp != PP_RXXX; 281 } 282 283 static inline unsigned long hpte_make_readonly(unsigned long ptel) 284 { 285 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX) 286 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX; 287 else 288 ptel |= PP_RXRX; 289 return ptel; 290 } 291 292 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci) 293 { 294 unsigned int wimg = hptel & HPTE_R_WIMG; 295 296 /* Handle SAO */ 297 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) && 298 cpu_has_feature(CPU_FTR_ARCH_206)) 299 wimg = HPTE_R_M; 300 301 if (!is_ci) 302 return wimg == HPTE_R_M; 303 /* 304 * if host is mapped cache inhibited, make sure hptel also have 305 * cache inhibited. 306 */ 307 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */ 308 return false; 309 return !!(wimg & HPTE_R_I); 310 } 311 312 /* 313 * If it's present and writable, atomically set dirty and referenced bits and 314 * return the PTE, otherwise return 0. 315 */ 316 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) 317 { 318 pte_t old_pte, new_pte = __pte(0); 319 320 while (1) { 321 /* 322 * Make sure we don't reload from ptep 323 */ 324 old_pte = READ_ONCE(*ptep); 325 /* 326 * wait until H_PAGE_BUSY is clear then set it atomically 327 */ 328 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) { 329 cpu_relax(); 330 continue; 331 } 332 /* If pte is not present return None */ 333 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) 334 return __pte(0); 335 336 new_pte = pte_mkyoung(old_pte); 337 if (writing && pte_write(old_pte)) 338 new_pte = pte_mkdirty(new_pte); 339 340 if (pte_xchg(ptep, old_pte, new_pte)) 341 break; 342 } 343 return new_pte; 344 } 345 346 static inline bool hpte_read_permission(unsigned long pp, unsigned long key) 347 { 348 if (key) 349 return PP_RWRX <= pp && pp <= PP_RXRX; 350 return true; 351 } 352 353 static inline bool hpte_write_permission(unsigned long pp, unsigned long key) 354 { 355 if (key) 356 return pp == PP_RWRW; 357 return pp <= PP_RWRW; 358 } 359 360 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr) 361 { 362 unsigned long skey; 363 364 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) | 365 ((hpte_r & HPTE_R_KEY_LO) >> 9); 366 return (amr >> (62 - 2 * skey)) & 3; 367 } 368 369 static inline void lock_rmap(unsigned long *rmap) 370 { 371 do { 372 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap)) 373 cpu_relax(); 374 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap)); 375 } 376 377 static inline void unlock_rmap(unsigned long *rmap) 378 { 379 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap); 380 } 381 382 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot, 383 unsigned long pagesize) 384 { 385 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1; 386 387 if (pagesize <= PAGE_SIZE) 388 return true; 389 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); 390 } 391 392 /* 393 * This works for 4k, 64k and 16M pages on POWER7, 394 * and 4k and 16M pages on PPC970. 395 */ 396 static inline unsigned long slb_pgsize_encoding(unsigned long psize) 397 { 398 unsigned long senc = 0; 399 400 if (psize > 0x1000) { 401 senc = SLB_VSID_L; 402 if (psize == 0x10000) 403 senc |= SLB_VSID_LP_01; 404 } 405 return senc; 406 } 407 408 static inline int is_vrma_hpte(unsigned long hpte_v) 409 { 410 return (hpte_v & ~0xffffffUL) == 411 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); 412 } 413 414 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 415 /* 416 * Note modification of an HPTE; set the HPTE modified bit 417 * if anyone is interested. 418 */ 419 static inline void note_hpte_modification(struct kvm *kvm, 420 struct revmap_entry *rev) 421 { 422 if (atomic_read(&kvm->arch.hpte_mod_interest)) 423 rev->guest_rpte |= HPTE_GR_MODIFIED; 424 } 425 426 /* 427 * Like kvm_memslots(), but for use in real mode when we can't do 428 * any RCU stuff (since the secondary threads are offline from the 429 * kernel's point of view), and we can't print anything. 430 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check(). 431 */ 432 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) 433 { 434 return rcu_dereference_raw_notrace(kvm->memslots[0]); 435 } 436 437 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); 438 439 extern void kvmhv_rm_send_ipi(int cpu); 440 441 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt) 442 { 443 /* HPTEs are 2**4 bytes long */ 444 return 1UL << (hpt->order - 4); 445 } 446 447 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt) 448 { 449 /* 128 (2**7) bytes in each HPTEG */ 450 return (1UL << (hpt->order - 7)) - 1; 451 } 452 453 /* Set bits in a dirty bitmap, which is in LE format */ 454 static inline void set_dirty_bits(unsigned long *map, unsigned long i, 455 unsigned long npages) 456 { 457 458 if (npages >= 8) 459 memset((char *)map + i / 8, 0xff, npages / 8); 460 else 461 for (; npages; ++i, --npages) 462 __set_bit_le(i, map); 463 } 464 465 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i, 466 unsigned long npages) 467 { 468 if (npages >= 8) 469 memset((char *)map + i / 8, 0xff, npages / 8); 470 else 471 for (; npages; ++i, --npages) 472 set_bit_le(i, map); 473 } 474 475 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 476 477 #endif /* __ASM_KVM_BOOK3S_64_H__ */ 478