1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20 #include <linux/types.h> 21 #include <linux/string.h> 22 #include <linux/kvm.h> 23 #include <linux/kvm_host.h> 24 #include <linux/highmem.h> 25 26 #include <asm/tlbflush.h> 27 #include <asm/kvm_ppc.h> 28 #include <asm/kvm_book3s.h> 29 #include <asm/book3s/64/mmu-hash.h> 30 31 /* #define DEBUG_MMU */ 32 33 #ifdef DEBUG_MMU 34 #define dprintk(X...) printk(KERN_INFO X) 35 #else 36 #define dprintk(X...) do { } while(0) 37 #endif 38 39 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) 40 { 41 unsigned long msr = vcpu->arch.intr_msr; 42 unsigned long cur_msr = kvmppc_get_msr(vcpu); 43 44 /* If transactional, change to suspend mode on IRQ delivery */ 45 if (MSR_TM_TRANSACTIONAL(cur_msr)) 46 msr |= MSR_TS_S; 47 else 48 msr |= cur_msr & MSR_TS_MASK; 49 50 kvmppc_set_msr(vcpu, msr); 51 } 52 53 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( 54 struct kvm_vcpu *vcpu, 55 gva_t eaddr) 56 { 57 int i; 58 u64 esid = GET_ESID(eaddr); 59 u64 esid_1t = GET_ESID_1T(eaddr); 60 61 for (i = 0; i < vcpu->arch.slb_nr; i++) { 62 u64 cmp_esid = esid; 63 64 if (!vcpu->arch.slb[i].valid) 65 continue; 66 67 if (vcpu->arch.slb[i].tb) 68 cmp_esid = esid_1t; 69 70 if (vcpu->arch.slb[i].esid == cmp_esid) 71 return &vcpu->arch.slb[i]; 72 } 73 74 dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", 75 eaddr, esid, esid_1t); 76 for (i = 0; i < vcpu->arch.slb_nr; i++) { 77 if (vcpu->arch.slb[i].vsid) 78 dprintk(" %d: %c%c%c %llx %llx\n", i, 79 vcpu->arch.slb[i].valid ? 'v' : ' ', 80 vcpu->arch.slb[i].large ? 'l' : ' ', 81 vcpu->arch.slb[i].tb ? 't' : ' ', 82 vcpu->arch.slb[i].esid, 83 vcpu->arch.slb[i].vsid); 84 } 85 86 return NULL; 87 } 88 89 static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) 90 { 91 return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; 92 } 93 94 static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) 95 { 96 return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; 97 } 98 99 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) 100 { 101 eaddr &= kvmppc_slb_offset_mask(slb); 102 103 return (eaddr >> VPN_SHIFT) | 104 ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); 105 } 106 107 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, 108 bool data) 109 { 110 struct kvmppc_slb *slb; 111 112 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); 113 if (!slb) 114 return 0; 115 116 return kvmppc_slb_calc_vpn(slb, eaddr); 117 } 118 119 static int mmu_pagesize(int mmu_pg) 120 { 121 switch (mmu_pg) { 122 case MMU_PAGE_64K: 123 return 16; 124 case MMU_PAGE_16M: 125 return 24; 126 } 127 return 12; 128 } 129 130 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) 131 { 132 return mmu_pagesize(slbe->base_page_size); 133 } 134 135 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) 136 { 137 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); 138 139 return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); 140 } 141 142 static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, 143 struct kvmppc_slb *slbe, gva_t eaddr, 144 bool second) 145 { 146 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 147 u64 hash, pteg, htabsize; 148 u32 ssize; 149 hva_t r; 150 u64 vpn; 151 152 htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); 153 154 vpn = kvmppc_slb_calc_vpn(slbe, eaddr); 155 ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; 156 hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); 157 if (second) 158 hash = ~hash; 159 hash &= ((1ULL << 39ULL) - 1ULL); 160 hash &= htabsize; 161 hash <<= 7ULL; 162 163 pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; 164 pteg |= hash; 165 166 dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n", 167 page, vcpu_book3s->sdr1, pteg, slbe->vsid); 168 169 /* When running a PAPR guest, SDR1 contains a HVA address instead 170 of a GPA */ 171 if (vcpu->arch.papr_enabled) 172 r = pteg; 173 else 174 r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); 175 176 if (kvm_is_error_hva(r)) 177 return r; 178 return r | (pteg & ~PAGE_MASK); 179 } 180 181 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) 182 { 183 int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); 184 u64 avpn; 185 186 avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); 187 avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); 188 189 if (p < 16) 190 avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ 191 else 192 avpn <<= p - 16; 193 194 return avpn; 195 } 196 197 /* 198 * Return page size encoded in the second word of a HPTE, or 199 * -1 for an invalid encoding for the base page size indicated by 200 * the SLB entry. This doesn't handle mixed pagesize segments yet. 201 */ 202 static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) 203 { 204 switch (slbe->base_page_size) { 205 case MMU_PAGE_64K: 206 if ((r & 0xf000) == 0x1000) 207 return MMU_PAGE_64K; 208 break; 209 case MMU_PAGE_16M: 210 if ((r & 0xff000) == 0) 211 return MMU_PAGE_16M; 212 break; 213 } 214 return -1; 215 } 216 217 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 218 struct kvmppc_pte *gpte, bool data, 219 bool iswrite) 220 { 221 struct kvmppc_slb *slbe; 222 hva_t ptegp; 223 u64 pteg[16]; 224 u64 avpn = 0; 225 u64 v, r; 226 u64 v_val, v_mask; 227 u64 eaddr_mask; 228 int i; 229 u8 pp, key = 0; 230 bool found = false; 231 bool second = false; 232 int pgsize; 233 ulong mp_ea = vcpu->arch.magic_page_ea; 234 235 /* Magic page override */ 236 if (unlikely(mp_ea) && 237 unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && 238 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 239 gpte->eaddr = eaddr; 240 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 241 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); 242 gpte->raddr &= KVM_PAM; 243 gpte->may_execute = true; 244 gpte->may_read = true; 245 gpte->may_write = true; 246 gpte->page_size = MMU_PAGE_4K; 247 gpte->wimg = HPTE_R_M; 248 249 return 0; 250 } 251 252 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); 253 if (!slbe) 254 goto no_seg_found; 255 256 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); 257 v_val = avpn & HPTE_V_AVPN; 258 259 if (slbe->tb) 260 v_val |= SLB_VSID_B_1T; 261 if (slbe->large) 262 v_val |= HPTE_V_LARGE; 263 v_val |= HPTE_V_VALID; 264 265 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | 266 HPTE_V_SECONDARY; 267 268 pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; 269 270 mutex_lock(&vcpu->kvm->arch.hpt_mutex); 271 272 do_second: 273 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); 274 if (kvm_is_error_hva(ptegp)) 275 goto no_page_found; 276 277 if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { 278 printk_ratelimited(KERN_ERR 279 "KVM: Can't copy data from 0x%lx!\n", ptegp); 280 goto no_page_found; 281 } 282 283 if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) 284 key = 4; 285 else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) 286 key = 4; 287 288 for (i=0; i<16; i+=2) { 289 u64 pte0 = be64_to_cpu(pteg[i]); 290 u64 pte1 = be64_to_cpu(pteg[i + 1]); 291 292 /* Check all relevant fields of 1st dword */ 293 if ((pte0 & v_mask) == v_val) { 294 /* If large page bit is set, check pgsize encoding */ 295 if (slbe->large && 296 (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { 297 pgsize = decode_pagesize(slbe, pte1); 298 if (pgsize < 0) 299 continue; 300 } 301 found = true; 302 break; 303 } 304 } 305 306 if (!found) { 307 if (second) 308 goto no_page_found; 309 v_val |= HPTE_V_SECONDARY; 310 second = true; 311 goto do_second; 312 } 313 314 v = be64_to_cpu(pteg[i]); 315 r = be64_to_cpu(pteg[i+1]); 316 pp = (r & HPTE_R_PP) | key; 317 if (r & HPTE_R_PP0) 318 pp |= 8; 319 320 gpte->eaddr = eaddr; 321 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); 322 323 eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; 324 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); 325 gpte->page_size = pgsize; 326 gpte->may_execute = ((r & HPTE_R_N) ? false : true); 327 if (unlikely(vcpu->arch.disable_kernel_nx) && 328 !(kvmppc_get_msr(vcpu) & MSR_PR)) 329 gpte->may_execute = true; 330 gpte->may_read = false; 331 gpte->may_write = false; 332 gpte->wimg = r & HPTE_R_WIMG; 333 334 switch (pp) { 335 case 0: 336 case 1: 337 case 2: 338 case 6: 339 gpte->may_write = true; 340 /* fall through */ 341 case 3: 342 case 5: 343 case 7: 344 case 10: 345 gpte->may_read = true; 346 break; 347 } 348 349 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " 350 "-> 0x%lx\n", 351 eaddr, avpn, gpte->vpage, gpte->raddr); 352 353 /* Update PTE R and C bits, so the guest's swapper knows we used the 354 * page */ 355 if (gpte->may_read && !(r & HPTE_R_R)) { 356 /* 357 * Set the accessed flag. 358 * We have to write this back with a single byte write 359 * because another vcpu may be accessing this on 360 * non-PAPR platforms such as mac99, and this is 361 * what real hardware does. 362 */ 363 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); 364 r |= HPTE_R_R; 365 put_user(r >> 8, addr + 6); 366 } 367 if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { 368 /* Set the dirty flag */ 369 /* Use a single byte write */ 370 char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); 371 r |= HPTE_R_C; 372 put_user(r, addr + 7); 373 } 374 375 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); 376 377 if (!gpte->may_read || (iswrite && !gpte->may_write)) 378 return -EPERM; 379 return 0; 380 381 no_page_found: 382 mutex_unlock(&vcpu->kvm->arch.hpt_mutex); 383 return -ENOENT; 384 385 no_seg_found: 386 dprintk("KVM MMU: Trigger segment fault\n"); 387 return -EINVAL; 388 } 389 390 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) 391 { 392 u64 esid, esid_1t; 393 int slb_nr; 394 struct kvmppc_slb *slbe; 395 396 dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); 397 398 esid = GET_ESID(rb); 399 esid_1t = GET_ESID_1T(rb); 400 slb_nr = rb & 0xfff; 401 402 if (slb_nr > vcpu->arch.slb_nr) 403 return; 404 405 slbe = &vcpu->arch.slb[slb_nr]; 406 407 slbe->large = (rs & SLB_VSID_L) ? 1 : 0; 408 slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; 409 slbe->esid = slbe->tb ? esid_1t : esid; 410 slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); 411 slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; 412 slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; 413 slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; 414 slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; 415 slbe->class = (rs & SLB_VSID_C) ? 1 : 0; 416 417 slbe->base_page_size = MMU_PAGE_4K; 418 if (slbe->large) { 419 if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { 420 switch (rs & SLB_VSID_LP) { 421 case SLB_VSID_LP_00: 422 slbe->base_page_size = MMU_PAGE_16M; 423 break; 424 case SLB_VSID_LP_01: 425 slbe->base_page_size = MMU_PAGE_64K; 426 break; 427 } 428 } else 429 slbe->base_page_size = MMU_PAGE_16M; 430 } 431 432 slbe->orige = rb & (ESID_MASK | SLB_ESID_V); 433 slbe->origv = rs; 434 435 /* Map the new segment */ 436 kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); 437 } 438 439 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) 440 { 441 struct kvmppc_slb *slbe; 442 443 if (slb_nr > vcpu->arch.slb_nr) 444 return 0; 445 446 slbe = &vcpu->arch.slb[slb_nr]; 447 448 return slbe->orige; 449 } 450 451 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) 452 { 453 struct kvmppc_slb *slbe; 454 455 if (slb_nr > vcpu->arch.slb_nr) 456 return 0; 457 458 slbe = &vcpu->arch.slb[slb_nr]; 459 460 return slbe->origv; 461 } 462 463 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) 464 { 465 struct kvmppc_slb *slbe; 466 u64 seg_size; 467 468 dprintk("KVM MMU: slbie(0x%llx)\n", ea); 469 470 slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 471 472 if (!slbe) 473 return; 474 475 dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); 476 477 slbe->valid = false; 478 slbe->orige = 0; 479 slbe->origv = 0; 480 481 seg_size = 1ull << kvmppc_slb_sid_shift(slbe); 482 kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); 483 } 484 485 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) 486 { 487 int i; 488 489 dprintk("KVM MMU: slbia()\n"); 490 491 for (i = 1; i < vcpu->arch.slb_nr; i++) { 492 vcpu->arch.slb[i].valid = false; 493 vcpu->arch.slb[i].orige = 0; 494 vcpu->arch.slb[i].origv = 0; 495 } 496 497 if (kvmppc_get_msr(vcpu) & MSR_IR) { 498 kvmppc_mmu_flush_segments(vcpu); 499 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 500 } 501 } 502 503 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, 504 ulong value) 505 { 506 u64 rb = 0, rs = 0; 507 508 /* 509 * According to Book3 2.01 mtsrin is implemented as: 510 * 511 * The SLB entry specified by (RB)32:35 is loaded from register 512 * RS, as follows. 513 * 514 * SLBE Bit Source SLB Field 515 * 516 * 0:31 0x0000_0000 ESID-0:31 517 * 32:35 (RB)32:35 ESID-32:35 518 * 36 0b1 V 519 * 37:61 0x00_0000|| 0b0 VSID-0:24 520 * 62:88 (RS)37:63 VSID-25:51 521 * 89:91 (RS)33:35 Ks Kp N 522 * 92 (RS)36 L ((RS)36 must be 0b0) 523 * 93 0b0 C 524 */ 525 526 dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); 527 528 /* ESID = srnum */ 529 rb |= (srnum & 0xf) << 28; 530 /* Set the valid bit */ 531 rb |= 1 << 27; 532 /* Index = ESID */ 533 rb |= srnum; 534 535 /* VSID = VSID */ 536 rs |= (value & 0xfffffff) << 12; 537 /* flags = flags */ 538 rs |= ((value >> 28) & 0x7) << 9; 539 540 kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); 541 } 542 543 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, 544 bool large) 545 { 546 u64 mask = 0xFFFFFFFFFULL; 547 long i; 548 struct kvm_vcpu *v; 549 550 dprintk("KVM MMU: tlbie(0x%lx)\n", va); 551 552 /* 553 * The tlbie instruction changed behaviour starting with 554 * POWER6. POWER6 and later don't have the large page flag 555 * in the instruction but in the RB value, along with bits 556 * indicating page and segment sizes. 557 */ 558 if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { 559 /* POWER6 or later */ 560 if (va & 1) { /* L bit */ 561 if ((va & 0xf000) == 0x1000) 562 mask = 0xFFFFFFFF0ULL; /* 64k page */ 563 else 564 mask = 0xFFFFFF000ULL; /* 16M page */ 565 } 566 } else { 567 /* older processors, e.g. PPC970 */ 568 if (large) 569 mask = 0xFFFFFF000ULL; 570 } 571 /* flush this VA on all vcpus */ 572 kvm_for_each_vcpu(i, v, vcpu->kvm) 573 kvmppc_mmu_pte_vflush(v, va >> 12, mask); 574 } 575 576 #ifdef CONFIG_PPC_64K_PAGES 577 static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) 578 { 579 ulong mp_ea = vcpu->arch.magic_page_ea; 580 581 return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && 582 (mp_ea >> SID_SHIFT) == esid; 583 } 584 #endif 585 586 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, 587 u64 *vsid) 588 { 589 ulong ea = esid << SID_SHIFT; 590 struct kvmppc_slb *slb; 591 u64 gvsid = esid; 592 ulong mp_ea = vcpu->arch.magic_page_ea; 593 int pagesize = MMU_PAGE_64K; 594 u64 msr = kvmppc_get_msr(vcpu); 595 596 if (msr & (MSR_DR|MSR_IR)) { 597 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); 598 if (slb) { 599 gvsid = slb->vsid; 600 pagesize = slb->base_page_size; 601 if (slb->tb) { 602 gvsid <<= SID_SHIFT_1T - SID_SHIFT; 603 gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); 604 gvsid |= VSID_1T; 605 } 606 } 607 } 608 609 switch (msr & (MSR_DR|MSR_IR)) { 610 case 0: 611 gvsid = VSID_REAL | esid; 612 break; 613 case MSR_IR: 614 gvsid |= VSID_REAL_IR; 615 break; 616 case MSR_DR: 617 gvsid |= VSID_REAL_DR; 618 break; 619 case MSR_DR|MSR_IR: 620 if (!slb) 621 goto no_slb; 622 623 break; 624 default: 625 BUG(); 626 break; 627 } 628 629 #ifdef CONFIG_PPC_64K_PAGES 630 /* 631 * Mark this as a 64k segment if the host is using 632 * 64k pages, the host MMU supports 64k pages and 633 * the guest segment page size is >= 64k, 634 * but not if this segment contains the magic page. 635 */ 636 if (pagesize >= MMU_PAGE_64K && 637 mmu_psize_defs[MMU_PAGE_64K].shift && 638 !segment_contains_magic_page(vcpu, esid)) 639 gvsid |= VSID_64K; 640 #endif 641 642 if (kvmppc_get_msr(vcpu) & MSR_PR) 643 gvsid |= VSID_PR; 644 645 *vsid = gvsid; 646 return 0; 647 648 no_slb: 649 /* Catch magic page case */ 650 if (unlikely(mp_ea) && 651 unlikely(esid == (mp_ea >> SID_SHIFT)) && 652 !(kvmppc_get_msr(vcpu) & MSR_PR)) { 653 *vsid = VSID_REAL | esid; 654 return 0; 655 } 656 657 return -EINVAL; 658 } 659 660 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) 661 { 662 return (to_book3s(vcpu)->hid[5] & 0x80); 663 } 664 665 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) 666 { 667 struct kvmppc_mmu *mmu = &vcpu->arch.mmu; 668 669 mmu->mfsrin = NULL; 670 mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; 671 mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; 672 mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; 673 mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; 674 mmu->slbie = kvmppc_mmu_book3s_64_slbie; 675 mmu->slbia = kvmppc_mmu_book3s_64_slbia; 676 mmu->xlate = kvmppc_mmu_book3s_64_xlate; 677 mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; 678 mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; 679 mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; 680 mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; 681 mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; 682 683 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; 684 } 685