1 /* 2 * PowerPC Radix MMU mulation helpers for QEMU. 3 * 4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "qemu/error-report.h" 24 #include "sysemu/kvm.h" 25 #include "kvm_ppc.h" 26 #include "exec/log.h" 27 #include "internal.h" 28 #include "mmu-radix64.h" 29 #include "mmu-book3s-v3.h" 30 31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env, 32 vaddr eaddr, 33 uint64_t *lpid, uint64_t *pid) 34 { 35 /* When EA(2:11) are nonzero, raise a segment interrupt */ 36 if (eaddr & ~R_EADDR_VALID_MASK) { 37 return false; 38 } 39 40 if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */ 41 switch (eaddr & R_EADDR_QUADRANT) { 42 case R_EADDR_QUADRANT0: 43 *lpid = 0; 44 *pid = env->spr[SPR_BOOKS_PID]; 45 break; 46 case R_EADDR_QUADRANT1: 47 *lpid = env->spr[SPR_LPIDR]; 48 *pid = env->spr[SPR_BOOKS_PID]; 49 break; 50 case R_EADDR_QUADRANT2: 51 *lpid = env->spr[SPR_LPIDR]; 52 *pid = 0; 53 break; 54 case R_EADDR_QUADRANT3: 55 *lpid = 0; 56 *pid = 0; 57 break; 58 default: 59 g_assert_not_reached(); 60 } 61 } else { /* !MSR[HV] -> Guest */ 62 switch (eaddr & R_EADDR_QUADRANT) { 63 case R_EADDR_QUADRANT0: /* Guest application */ 64 *lpid = env->spr[SPR_LPIDR]; 65 *pid = env->spr[SPR_BOOKS_PID]; 66 break; 67 case R_EADDR_QUADRANT1: /* Illegal */ 68 case R_EADDR_QUADRANT2: 69 return false; 70 case R_EADDR_QUADRANT3: /* Guest OS */ 71 *lpid = env->spr[SPR_LPIDR]; 72 *pid = 0; /* pid set to 0 -> addresses guest operating system */ 73 break; 74 default: 75 g_assert_not_reached(); 76 } 77 } 78 79 return true; 80 } 81 82 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type, 83 vaddr eaddr) 84 { 85 CPUState *cs = CPU(cpu); 86 CPUPPCState *env = &cpu->env; 87 88 switch (access_type) { 89 case MMU_INST_FETCH: 90 /* Instruction Segment Interrupt */ 91 cs->exception_index = POWERPC_EXCP_ISEG; 92 break; 93 case MMU_DATA_STORE: 94 case MMU_DATA_LOAD: 95 /* Data Segment Interrupt */ 96 cs->exception_index = POWERPC_EXCP_DSEG; 97 env->spr[SPR_DAR] = eaddr; 98 break; 99 default: 100 g_assert_not_reached(); 101 } 102 env->error_code = 0; 103 } 104 105 static inline const char *access_str(MMUAccessType access_type) 106 { 107 return access_type == MMU_DATA_LOAD ? "reading" : 108 (access_type == MMU_DATA_STORE ? "writing" : "execute"); 109 } 110 111 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type, 112 vaddr eaddr, uint32_t cause) 113 { 114 CPUState *cs = CPU(cpu); 115 CPUPPCState *env = &cpu->env; 116 117 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n", 118 __func__, access_str(access_type), 119 eaddr, cause); 120 121 switch (access_type) { 122 case MMU_INST_FETCH: 123 /* Instruction Storage Interrupt */ 124 cs->exception_index = POWERPC_EXCP_ISI; 125 env->error_code = cause; 126 break; 127 case MMU_DATA_STORE: 128 cause |= DSISR_ISSTORE; 129 /* fall through */ 130 case MMU_DATA_LOAD: 131 /* Data Storage Interrupt */ 132 cs->exception_index = POWERPC_EXCP_DSI; 133 env->spr[SPR_DSISR] = cause; 134 env->spr[SPR_DAR] = eaddr; 135 env->error_code = 0; 136 break; 137 default: 138 g_assert_not_reached(); 139 } 140 } 141 142 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, 143 vaddr eaddr, hwaddr g_raddr, uint32_t cause) 144 { 145 CPUState *cs = CPU(cpu); 146 CPUPPCState *env = &cpu->env; 147 148 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%" 149 HWADDR_PRIx" cause %08x\n", 150 __func__, access_str(access_type), 151 eaddr, g_raddr, cause); 152 153 switch (access_type) { 154 case MMU_INST_FETCH: 155 /* H Instruction Storage Interrupt */ 156 cs->exception_index = POWERPC_EXCP_HISI; 157 env->spr[SPR_ASDR] = g_raddr; 158 env->error_code = cause; 159 break; 160 case MMU_DATA_STORE: 161 cause |= DSISR_ISSTORE; 162 /* fall through */ 163 case MMU_DATA_LOAD: 164 /* H Data Storage Interrupt */ 165 cs->exception_index = POWERPC_EXCP_HDSI; 166 env->spr[SPR_HDSISR] = cause; 167 env->spr[SPR_HDAR] = eaddr; 168 env->spr[SPR_ASDR] = g_raddr; 169 env->error_code = 0; 170 break; 171 default: 172 g_assert_not_reached(); 173 } 174 } 175 176 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, 177 uint64_t pte, int *fault_cause, int *prot, 178 int mmu_idx, bool partition_scoped) 179 { 180 CPUPPCState *env = &cpu->env; 181 int need_prot; 182 183 /* Check Page Attributes (pte58:59) */ 184 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) { 185 /* 186 * Radix PTE entries with the non-idempotent I/O attribute are treated 187 * as guarded storage 188 */ 189 *fault_cause |= SRR1_NOEXEC_GUARD; 190 return true; 191 } 192 193 /* Determine permissions allowed by Encoded Access Authority */ 194 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) { 195 *prot = 0; 196 } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || 197 partition_scoped) { 198 *prot = ppc_radix64_get_prot_eaa(pte); 199 } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ 200 *prot = ppc_radix64_get_prot_eaa(pte); 201 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ 202 } 203 204 /* Check if requested access type is allowed */ 205 need_prot = prot_for_access_type(access_type); 206 if (need_prot & ~*prot) { /* Page Protected for that Access */ 207 *fault_cause |= DSISR_PROTFAULT; 208 return true; 209 } 210 211 return false; 212 } 213 214 static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type, 215 uint64_t pte, hwaddr pte_addr, int *prot) 216 { 217 CPUState *cs = CPU(cpu); 218 uint64_t npte; 219 220 npte = pte | R_PTE_R; /* Always set reference bit */ 221 222 if (access_type == MMU_DATA_STORE) { /* Store/Write */ 223 npte |= R_PTE_C; /* Set change bit */ 224 } else { 225 /* 226 * Treat the page as read-only for now, so that a later write 227 * will pass through this function again to set the C bit. 228 */ 229 *prot &= ~PAGE_WRITE; 230 } 231 232 if (pte ^ npte) { /* If pte has changed then write it back */ 233 stq_phys(cs->as, pte_addr, npte); 234 } 235 } 236 237 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, 238 uint64_t *pte_addr, uint64_t *nls, 239 int *psize, uint64_t *pte, int *fault_cause) 240 { 241 uint64_t index, pde; 242 243 if (*nls < 5) { /* Directory maps less than 2**5 entries */ 244 *fault_cause |= DSISR_R_BADCONFIG; 245 return 1; 246 } 247 248 /* Read page <directory/table> entry from guest address space */ 249 pde = ldq_phys(as, *pte_addr); 250 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ 251 *fault_cause |= DSISR_NOPTE; 252 return 1; 253 } 254 255 *pte = pde; 256 *psize -= *nls; 257 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */ 258 *nls = pde & R_PDE_NLS; 259 index = eaddr >> (*psize - *nls); /* Shift */ 260 index &= ((1UL << *nls) - 1); /* Mask */ 261 *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde)); 262 } 263 return 0; 264 } 265 266 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, 267 uint64_t base_addr, uint64_t nls, 268 hwaddr *raddr, int *psize, uint64_t *pte, 269 int *fault_cause, hwaddr *pte_addr) 270 { 271 uint64_t index, pde, rpn , mask; 272 273 if (nls < 5) { /* Directory maps less than 2**5 entries */ 274 *fault_cause |= DSISR_R_BADCONFIG; 275 return 1; 276 } 277 278 index = eaddr >> (*psize - nls); /* Shift */ 279 index &= ((1UL << nls) - 1); /* Mask */ 280 *pte_addr = base_addr + (index * sizeof(pde)); 281 do { 282 int ret; 283 284 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, 285 fault_cause); 286 if (ret) { 287 return ret; 288 } 289 } while (!(pde & R_PTE_LEAF)); 290 291 *pte = pde; 292 rpn = pde & R_PTE_RPN; 293 mask = (1UL << *psize) - 1; 294 295 /* Or high bits of rpn and low bits to ea to form whole real addr */ 296 *raddr = (rpn & ~mask) | (eaddr & mask); 297 return 0; 298 } 299 300 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) 301 { 302 CPUPPCState *env = &cpu->env; 303 304 if (!(pate->dw0 & PATE0_HR)) { 305 return false; 306 } 307 if (lpid == 0 && !msr_hv) { 308 return false; 309 } 310 if ((pate->dw0 & PATE1_R_PRTS) < 5) { 311 return false; 312 } 313 /* More checks ... */ 314 return true; 315 } 316 317 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, 318 MMUAccessType access_type, 319 vaddr eaddr, hwaddr g_raddr, 320 ppc_v3_pate_t pate, 321 hwaddr *h_raddr, int *h_prot, 322 int *h_page_size, bool pde_addr, 323 int mmu_idx, bool guest_visible) 324 { 325 int fault_cause = 0; 326 hwaddr pte_addr; 327 uint64_t pte; 328 329 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 330 " mmu_idx %u 0x%"HWADDR_PRIx"\n", 331 __func__, access_str(access_type), 332 eaddr, mmu_idx, g_raddr); 333 334 *h_page_size = PRTBE_R_GET_RTS(pate.dw0); 335 /* No valid pte or access denied due to protection */ 336 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, 337 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, 338 &pte, &fault_cause, &pte_addr) || 339 ppc_radix64_check_prot(cpu, access_type, pte, 340 &fault_cause, h_prot, mmu_idx, true)) { 341 if (pde_addr) { /* address being translated was that of a guest pde */ 342 fault_cause |= DSISR_PRTABLE_FAULT; 343 } 344 if (guest_visible) { 345 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause); 346 } 347 return 1; 348 } 349 350 if (guest_visible) { 351 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot); 352 } 353 354 return 0; 355 } 356 357 /* 358 * The spapr vhc has a flat partition scope provided by qemu memory when 359 * not nested. 360 * 361 * When running a nested guest, the addressing is 2-level radix on top of the 362 * vhc memory, so it works practically identically to the bare metal 2-level 363 * radix. So that code is selected directly. A cleaner and more flexible nested 364 * hypervisor implementation would allow the vhc to provide a ->nested_xlate() 365 * function but that is not required for the moment. 366 */ 367 static bool vhyp_flat_addressing(PowerPCCPU *cpu) 368 { 369 if (cpu->vhyp) { 370 return !vhyp_cpu_in_nested(cpu); 371 } 372 return false; 373 } 374 375 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, 376 MMUAccessType access_type, 377 vaddr eaddr, uint64_t pid, 378 ppc_v3_pate_t pate, hwaddr *g_raddr, 379 int *g_prot, int *g_page_size, 380 int mmu_idx, bool guest_visible) 381 { 382 CPUState *cs = CPU(cpu); 383 CPUPPCState *env = &cpu->env; 384 uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte; 385 int fault_cause = 0, h_page_size, h_prot; 386 hwaddr h_raddr, pte_addr; 387 int ret; 388 389 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 390 " mmu_idx %u pid %"PRIu64"\n", 391 __func__, access_str(access_type), 392 eaddr, mmu_idx, pid); 393 394 /* Index Process Table by PID to Find Corresponding Process Table Entry */ 395 offset = pid * sizeof(struct prtb_entry); 396 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); 397 if (offset >= size) { 398 /* offset exceeds size of the process table */ 399 if (guest_visible) { 400 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); 401 } 402 return 1; 403 } 404 prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset; 405 406 if (vhyp_flat_addressing(cpu)) { 407 prtbe0 = ldq_phys(cs->as, prtbe_addr); 408 } else { 409 /* 410 * Process table addresses are subject to partition-scoped 411 * translation 412 * 413 * On a Radix host, the partition-scoped page table for LPID=0 414 * is only used to translate the effective addresses of the 415 * process table entries. 416 */ 417 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr, 418 pate, &h_raddr, &h_prot, 419 &h_page_size, true, 420 /* mmu_idx is 5 because we're translating from hypervisor scope */ 421 5, guest_visible); 422 if (ret) { 423 return ret; 424 } 425 prtbe0 = ldq_phys(cs->as, h_raddr); 426 } 427 428 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ 429 *g_page_size = PRTBE_R_GET_RTS(prtbe0); 430 base_addr = prtbe0 & PRTBE_R_RPDB; 431 nls = prtbe0 & PRTBE_R_RPDS; 432 if (msr_hv || vhyp_flat_addressing(cpu)) { 433 /* 434 * Can treat process table addresses as real addresses 435 */ 436 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr, 437 nls, g_raddr, g_page_size, &pte, 438 &fault_cause, &pte_addr); 439 if (ret) { 440 /* No valid PTE */ 441 if (guest_visible) { 442 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 443 } 444 return ret; 445 } 446 } else { 447 uint64_t rpn, mask; 448 449 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ 450 index &= ((1UL << nls) - 1); /* Mask */ 451 pte_addr = base_addr + (index * sizeof(pte)); 452 453 /* 454 * Each process table address is subject to a partition-scoped 455 * translation 456 */ 457 do { 458 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr, 459 pate, &h_raddr, &h_prot, 460 &h_page_size, true, 461 /* mmu_idx is 5 because we're translating from hypervisor scope */ 462 5, guest_visible); 463 if (ret) { 464 return ret; 465 } 466 467 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr, 468 &nls, g_page_size, &pte, &fault_cause); 469 if (ret) { 470 /* No valid pte */ 471 if (guest_visible) { 472 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 473 } 474 return ret; 475 } 476 pte_addr = h_raddr; 477 } while (!(pte & R_PTE_LEAF)); 478 479 rpn = pte & R_PTE_RPN; 480 mask = (1UL << *g_page_size) - 1; 481 482 /* Or high bits of rpn and low bits to ea to form whole real addr */ 483 *g_raddr = (rpn & ~mask) | (eaddr & mask); 484 } 485 486 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, 487 g_prot, mmu_idx, false)) { 488 /* Access denied due to protection */ 489 if (guest_visible) { 490 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 491 } 492 return 1; 493 } 494 495 if (guest_visible) { 496 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot); 497 } 498 499 return 0; 500 } 501 502 /* 503 * Radix tree translation is a 2 steps translation process: 504 * 505 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr 506 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr 507 * 508 * MSR[HV] 509 * +-------------+----------------+---------------+ 510 * | | HV = 0 | HV = 1 | 511 * +-------------+----------------+---------------+ 512 * | Relocation | Partition | No | 513 * | = Off | Scoped | Translation | 514 * Relocation +-------------+----------------+---------------+ 515 * | Relocation | Partition & | Process | 516 * | = On | Process Scoped | Scoped | 517 * +-------------+----------------+---------------+ 518 */ 519 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, 520 MMUAccessType access_type, hwaddr *raddr, 521 int *psizep, int *protp, int mmu_idx, 522 bool guest_visible) 523 { 524 CPUPPCState *env = &cpu->env; 525 uint64_t lpid, pid; 526 ppc_v3_pate_t pate; 527 int psize, prot; 528 hwaddr g_raddr; 529 bool relocation; 530 531 assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp)); 532 533 relocation = !mmuidx_real(mmu_idx); 534 535 /* HV or virtual hypervisor Real Mode Access */ 536 if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) { 537 /* In real mode top 4 effective addr bits (mostly) ignored */ 538 *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 539 540 /* In HV mode, add HRMOR if top EA bit is clear */ 541 if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 542 if (!(eaddr >> 63)) { 543 *raddr |= env->spr[SPR_HRMOR]; 544 } 545 } 546 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 547 *psizep = TARGET_PAGE_BITS; 548 return true; 549 } 550 551 /* 552 * Check UPRT (we avoid the check in real mode to deal with 553 * transitional states during kexec. 554 */ 555 if (guest_visible && !ppc64_use_proc_tbl(cpu)) { 556 qemu_log_mask(LOG_GUEST_ERROR, 557 "LPCR:UPRT not set in radix mode ! LPCR=" 558 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); 559 } 560 561 /* Virtual Mode Access - get the fully qualified address */ 562 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) { 563 if (guest_visible) { 564 ppc_radix64_raise_segi(cpu, access_type, eaddr); 565 } 566 return false; 567 } 568 569 /* Get Process Table */ 570 if (cpu->vhyp) { 571 PPCVirtualHypervisorClass *vhc; 572 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 573 if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) { 574 if (guest_visible) { 575 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 576 DSISR_R_BADCONFIG); 577 } 578 return false; 579 } 580 } else { 581 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { 582 if (guest_visible) { 583 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 584 DSISR_R_BADCONFIG); 585 } 586 return false; 587 } 588 if (!validate_pate(cpu, lpid, &pate)) { 589 if (guest_visible) { 590 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 591 DSISR_R_BADCONFIG); 592 } 593 return false; 594 } 595 } 596 597 *psizep = INT_MAX; 598 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 599 600 /* 601 * Perform process-scoped translation if relocation enabled. 602 * 603 * - Translates an effective address to a host real address in 604 * quadrants 0 and 3 when HV=1. 605 * 606 * - Translates an effective address to a guest real address. 607 */ 608 if (relocation) { 609 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid, 610 pate, &g_raddr, &prot, 611 &psize, mmu_idx, guest_visible); 612 if (ret) { 613 return false; 614 } 615 *psizep = MIN(*psizep, psize); 616 *protp &= prot; 617 } else { 618 g_raddr = eaddr & R_EADDR_MASK; 619 } 620 621 if (vhyp_flat_addressing(cpu)) { 622 *raddr = g_raddr; 623 } else { 624 /* 625 * Perform partition-scoped translation if !HV or HV access to 626 * quadrants 1 or 2. Translates a guest real address to a host 627 * real address. 628 */ 629 if (lpid || !mmuidx_hv(mmu_idx)) { 630 int ret; 631 632 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, 633 g_raddr, pate, raddr, 634 &prot, &psize, false, 635 mmu_idx, guest_visible); 636 if (ret) { 637 return false; 638 } 639 *psizep = MIN(*psizep, psize); 640 *protp &= prot; 641 } else { 642 *raddr = g_raddr; 643 } 644 } 645 646 return true; 647 } 648 649 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 650 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 651 bool guest_visible) 652 { 653 bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp, 654 psizep, protp, mmu_idx, guest_visible); 655 656 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 657 " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n", 658 __func__, access_str(access_type), 659 eaddr, mmu_idx, 660 *protp & PAGE_READ ? 'r' : '-', 661 *protp & PAGE_WRITE ? 'w' : '-', 662 *protp & PAGE_EXEC ? 'x' : '-', 663 *raddrp); 664 665 return ret; 666 } 667