1 /* 2 * PowerPC Radix MMU mulation helpers for QEMU. 3 * 4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "cpu.h" 22 #include "exec/exec-all.h" 23 #include "exec/page-protection.h" 24 #include "qemu/error-report.h" 25 #include "sysemu/kvm.h" 26 #include "kvm_ppc.h" 27 #include "exec/log.h" 28 #include "internal.h" 29 #include "mmu-radix64.h" 30 #include "mmu-book3s-v3.h" 31 32 /* Radix Partition Table Entry Fields */ 33 #define PATE1_R_PRTB 0x0FFFFFFFFFFFF000 34 #define PATE1_R_PRTS 0x000000000000001F 35 36 /* Radix Process Table Entry Fields */ 37 #define PRTBE_R_GET_RTS(rts) \ 38 ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31) 39 #define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00 40 #define PRTBE_R_RPDS 0x000000000000001F 41 42 /* Radix Page Directory/Table Entry Fields */ 43 #define R_PTE_VALID 0x8000000000000000 44 #define R_PTE_LEAF 0x4000000000000000 45 #define R_PTE_SW0 0x2000000000000000 46 #define R_PTE_RPN 0x01FFFFFFFFFFF000 47 #define R_PTE_SW1 0x0000000000000E00 48 #define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7)) 49 #define R_PTE_R 0x0000000000000100 50 #define R_PTE_C 0x0000000000000080 51 #define R_PTE_ATT 0x0000000000000030 52 #define R_PTE_ATT_NORMAL 0x0000000000000000 53 #define R_PTE_ATT_SAO 0x0000000000000010 54 #define R_PTE_ATT_NI_IO 0x0000000000000020 55 #define R_PTE_ATT_TOLERANT_IO 0x0000000000000030 56 #define R_PTE_EAA_PRIV 0x0000000000000008 57 #define R_PTE_EAA_R 0x0000000000000004 58 #define R_PTE_EAA_RW 0x0000000000000002 59 #define R_PTE_EAA_X 0x0000000000000001 60 #define R_PDE_NLB PRTBE_R_RPDB 61 #define R_PDE_NLS PRTBE_R_RPDS 62 63 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env, 64 vaddr eaddr, 65 uint64_t *lpid, uint64_t *pid) 66 { 67 /* When EA(2:11) are nonzero, raise a segment interrupt */ 68 if (eaddr & ~R_EADDR_VALID_MASK) { 69 return false; 70 } 71 72 if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */ 73 switch (eaddr & R_EADDR_QUADRANT) { 74 case R_EADDR_QUADRANT0: 75 *lpid = 0; 76 *pid = env->spr[SPR_BOOKS_PID]; 77 break; 78 case R_EADDR_QUADRANT1: 79 *lpid = env->spr[SPR_LPIDR]; 80 *pid = env->spr[SPR_BOOKS_PID]; 81 break; 82 case R_EADDR_QUADRANT2: 83 *lpid = env->spr[SPR_LPIDR]; 84 *pid = 0; 85 break; 86 case R_EADDR_QUADRANT3: 87 *lpid = 0; 88 *pid = 0; 89 break; 90 default: 91 g_assert_not_reached(); 92 } 93 } else { /* !MSR[HV] -> Guest */ 94 switch (eaddr & R_EADDR_QUADRANT) { 95 case R_EADDR_QUADRANT0: /* Guest application */ 96 *lpid = env->spr[SPR_LPIDR]; 97 *pid = env->spr[SPR_BOOKS_PID]; 98 break; 99 case R_EADDR_QUADRANT1: /* Illegal */ 100 case R_EADDR_QUADRANT2: 101 return false; 102 case R_EADDR_QUADRANT3: /* Guest OS */ 103 *lpid = env->spr[SPR_LPIDR]; 104 *pid = 0; /* pid set to 0 -> addresses guest operating system */ 105 break; 106 default: 107 g_assert_not_reached(); 108 } 109 } 110 111 return true; 112 } 113 114 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type, 115 vaddr eaddr) 116 { 117 CPUState *cs = CPU(cpu); 118 CPUPPCState *env = &cpu->env; 119 120 switch (access_type) { 121 case MMU_INST_FETCH: 122 /* Instruction Segment Interrupt */ 123 cs->exception_index = POWERPC_EXCP_ISEG; 124 break; 125 case MMU_DATA_STORE: 126 case MMU_DATA_LOAD: 127 /* Data Segment Interrupt */ 128 cs->exception_index = POWERPC_EXCP_DSEG; 129 env->spr[SPR_DAR] = eaddr; 130 break; 131 default: 132 g_assert_not_reached(); 133 } 134 env->error_code = 0; 135 } 136 137 static inline const char *access_str(MMUAccessType access_type) 138 { 139 return access_type == MMU_DATA_LOAD ? "reading" : 140 (access_type == MMU_DATA_STORE ? "writing" : "execute"); 141 } 142 143 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type, 144 vaddr eaddr, uint32_t cause) 145 { 146 CPUState *cs = CPU(cpu); 147 CPUPPCState *env = &cpu->env; 148 149 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n", 150 __func__, access_str(access_type), 151 eaddr, cause); 152 153 switch (access_type) { 154 case MMU_INST_FETCH: 155 /* Instruction Storage Interrupt */ 156 cs->exception_index = POWERPC_EXCP_ISI; 157 env->error_code = cause; 158 break; 159 case MMU_DATA_STORE: 160 cause |= DSISR_ISSTORE; 161 /* fall through */ 162 case MMU_DATA_LOAD: 163 /* Data Storage Interrupt */ 164 cs->exception_index = POWERPC_EXCP_DSI; 165 env->spr[SPR_DSISR] = cause; 166 env->spr[SPR_DAR] = eaddr; 167 env->error_code = 0; 168 break; 169 default: 170 g_assert_not_reached(); 171 } 172 } 173 174 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, 175 vaddr eaddr, hwaddr g_raddr, uint32_t cause) 176 { 177 CPUState *cs = CPU(cpu); 178 CPUPPCState *env = &cpu->env; 179 180 env->error_code = 0; 181 if (cause & DSISR_PRTABLE_FAULT) { 182 /* HDSI PRTABLE_FAULT gets the originating access type in error_code */ 183 env->error_code = access_type; 184 access_type = MMU_DATA_LOAD; 185 } 186 187 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%" 188 HWADDR_PRIx" cause %08x\n", 189 __func__, access_str(access_type), 190 eaddr, g_raddr, cause); 191 192 switch (access_type) { 193 case MMU_INST_FETCH: 194 /* H Instruction Storage Interrupt */ 195 cs->exception_index = POWERPC_EXCP_HISI; 196 env->spr[SPR_ASDR] = g_raddr; 197 env->error_code = cause; 198 break; 199 case MMU_DATA_STORE: 200 cause |= DSISR_ISSTORE; 201 /* fall through */ 202 case MMU_DATA_LOAD: 203 /* H Data Storage Interrupt */ 204 cs->exception_index = POWERPC_EXCP_HDSI; 205 env->spr[SPR_HDSISR] = cause; 206 env->spr[SPR_HDAR] = eaddr; 207 env->spr[SPR_ASDR] = g_raddr; 208 break; 209 default: 210 g_assert_not_reached(); 211 } 212 } 213 214 static int ppc_radix64_get_prot_eaa(uint64_t pte) 215 { 216 return (pte & R_PTE_EAA_R ? PAGE_READ : 0) | 217 (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) | 218 (pte & R_PTE_EAA_X ? PAGE_EXEC : 0); 219 } 220 221 static int ppc_radix64_get_prot_amr(const PowerPCCPU *cpu) 222 { 223 const CPUPPCState *env = &cpu->env; 224 int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */ 225 int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */ 226 227 return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */ 228 (amr & 0x1 ? 0 : PAGE_READ) | 229 (iamr & 0x1 ? 0 : PAGE_EXEC); 230 } 231 232 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, 233 uint64_t pte, int *fault_cause, int *prot, 234 int mmu_idx, bool partition_scoped) 235 { 236 CPUPPCState *env = &cpu->env; 237 238 /* Check Page Attributes (pte58:59) */ 239 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) { 240 /* 241 * Radix PTE entries with the non-idempotent I/O attribute are treated 242 * as guarded storage 243 */ 244 *fault_cause |= SRR1_NOEXEC_GUARD; 245 return true; 246 } 247 248 /* Determine permissions allowed by Encoded Access Authority */ 249 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && 250 FIELD_EX64(env->msr, MSR, PR)) { 251 *prot = 0; 252 } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || 253 partition_scoped) { 254 *prot = ppc_radix64_get_prot_eaa(pte); 255 } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ 256 *prot = ppc_radix64_get_prot_eaa(pte); 257 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ 258 } 259 260 /* Check if requested access type is allowed */ 261 if (!check_prot_access_type(*prot, access_type)) { 262 /* Page Protected for that Access */ 263 *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD : 264 DSISR_PROTFAULT; 265 return true; 266 } 267 268 return false; 269 } 270 271 static int ppc_radix64_check_rc(MMUAccessType access_type, uint64_t pte) 272 { 273 switch (access_type) { 274 case MMU_DATA_STORE: 275 if (!(pte & R_PTE_C)) { 276 break; 277 } 278 /* fall through */ 279 case MMU_INST_FETCH: 280 case MMU_DATA_LOAD: 281 if (!(pte & R_PTE_R)) { 282 break; 283 } 284 285 /* R/C bits are already set appropriately for this access */ 286 return 0; 287 } 288 289 return 1; 290 } 291 292 static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls) 293 { 294 bool ret; 295 296 /* 297 * Check if this is a valid level, according to POWER9 and POWER10 298 * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively: 299 * Supported Radix Tree Configurations and Resulting Page Sizes. 300 * 301 * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future 302 * CPUs that supports a different Radix MMU configuration will need their 303 * own implementation. 304 */ 305 switch (level) { 306 case 0: /* Root Page Dir */ 307 ret = psize == 52 && nls == 13; 308 break; 309 case 1: 310 case 2: 311 ret = nls == 9; 312 break; 313 case 3: 314 ret = nls == 9 || nls == 5; 315 break; 316 default: 317 ret = false; 318 } 319 320 if (unlikely(!ret)) { 321 qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: " 322 "level %d size %d nls %"PRIu64"\n", 323 level, psize, nls); 324 } 325 return ret; 326 } 327 328 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, 329 uint64_t *pte_addr, uint64_t *nls, 330 int *psize, uint64_t *pte, int *fault_cause) 331 { 332 uint64_t index, mask, nlb, pde; 333 334 /* Read page <directory/table> entry from guest address space */ 335 pde = ldq_phys(as, *pte_addr); 336 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ 337 *fault_cause |= DSISR_NOPTE; 338 return 1; 339 } 340 341 *pte = pde; 342 *psize -= *nls; 343 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */ 344 *nls = pde & R_PDE_NLS; 345 index = eaddr >> (*psize - *nls); /* Shift */ 346 index &= ((1UL << *nls) - 1); /* Mask */ 347 nlb = pde & R_PDE_NLB; 348 mask = MAKE_64BIT_MASK(0, *nls + 3); 349 350 if (nlb & mask) { 351 qemu_log_mask(LOG_GUEST_ERROR, 352 "%s: misaligned page dir/table base: 0x%" PRIx64 353 " page dir size: 0x%" PRIx64 "\n", 354 __func__, nlb, mask + 1); 355 nlb &= ~mask; 356 } 357 *pte_addr = nlb + index * sizeof(pde); 358 } 359 return 0; 360 } 361 362 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, 363 uint64_t base_addr, uint64_t nls, 364 hwaddr *raddr, int *psize, uint64_t *pte, 365 int *fault_cause, hwaddr *pte_addr) 366 { 367 uint64_t index, pde, rpn, mask; 368 int level = 0; 369 370 index = eaddr >> (*psize - nls); /* Shift */ 371 index &= ((1UL << nls) - 1); /* Mask */ 372 mask = MAKE_64BIT_MASK(0, nls + 3); 373 374 if (base_addr & mask) { 375 qemu_log_mask(LOG_GUEST_ERROR, 376 "%s: misaligned page dir base: 0x%" PRIx64 377 " page dir size: 0x%" PRIx64 "\n", 378 __func__, base_addr, mask + 1); 379 base_addr &= ~mask; 380 } 381 *pte_addr = base_addr + index * sizeof(pde); 382 383 do { 384 int ret; 385 386 if (!ppc_radix64_is_valid_level(level++, *psize, nls)) { 387 *fault_cause |= DSISR_R_BADCONFIG; 388 return 1; 389 } 390 391 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, 392 fault_cause); 393 if (ret) { 394 return ret; 395 } 396 } while (!(pde & R_PTE_LEAF)); 397 398 *pte = pde; 399 rpn = pde & R_PTE_RPN; 400 mask = (1UL << *psize) - 1; 401 402 /* Or high bits of rpn and low bits to ea to form whole real addr */ 403 *raddr = (rpn & ~mask) | (eaddr & mask); 404 return 0; 405 } 406 407 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) 408 { 409 CPUPPCState *env = &cpu->env; 410 411 if (!(pate->dw0 & PATE0_HR)) { 412 return false; 413 } 414 if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) { 415 return false; 416 } 417 if ((pate->dw0 & PATE1_R_PRTS) < 5) { 418 return false; 419 } 420 /* More checks ... */ 421 return true; 422 } 423 424 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, 425 MMUAccessType orig_access_type, 426 vaddr eaddr, hwaddr g_raddr, 427 ppc_v3_pate_t pate, 428 hwaddr *h_raddr, int *h_prot, 429 int *h_page_size, bool pde_addr, 430 int mmu_idx, uint64_t lpid, 431 bool guest_visible) 432 { 433 MMUAccessType access_type = orig_access_type; 434 int fault_cause = 0; 435 hwaddr pte_addr; 436 uint64_t pte; 437 438 if (pde_addr) { 439 /* 440 * Translation of process-scoped tables/directories is performed as 441 * a read-access. 442 */ 443 access_type = MMU_DATA_LOAD; 444 } 445 446 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 447 " mmu_idx %u 0x%"HWADDR_PRIx"\n", 448 __func__, access_str(access_type), 449 eaddr, mmu_idx, g_raddr); 450 451 *h_page_size = PRTBE_R_GET_RTS(pate.dw0); 452 /* No valid pte or access denied due to protection */ 453 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, 454 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, 455 &pte, &fault_cause, &pte_addr) || 456 ppc_radix64_check_prot(cpu, access_type, pte, 457 &fault_cause, h_prot, mmu_idx, true)) { 458 if (pde_addr) { /* address being translated was that of a guest pde */ 459 fault_cause |= DSISR_PRTABLE_FAULT; 460 } 461 if (guest_visible) { 462 ppc_radix64_raise_hsi(cpu, orig_access_type, 463 eaddr, g_raddr, fault_cause); 464 } 465 return 1; 466 } 467 468 if (guest_visible) { 469 if (ppc_radix64_check_rc(access_type, pte)) { 470 /* 471 * Per ISA 3.1 Book III, 7.5.3 and 7.5.5, failure to set R/C during 472 * partition-scoped translation when effLPID = 0 results in normal 473 * (non-Hypervisor) Data and Instruction Storage Interrupts 474 * respectively. 475 * 476 * ISA 3.0 is ambiguous about this, but tests on POWER9 hardware 477 * seem to exhibit the same behavior. 478 */ 479 if (lpid > 0) { 480 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, 481 DSISR_ATOMIC_RC); 482 } else { 483 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC); 484 } 485 return 1; 486 } 487 } 488 489 return 0; 490 } 491 492 /* 493 * The spapr vhc has a flat partition scope provided by qemu memory when 494 * not nested. 495 * 496 * When running a nested guest, the addressing is 2-level radix on top of the 497 * vhc memory, so it works practically identically to the bare metal 2-level 498 * radix. So that code is selected directly. A cleaner and more flexible nested 499 * hypervisor implementation would allow the vhc to provide a ->nested_xlate() 500 * function but that is not required for the moment. 501 */ 502 static bool vhyp_flat_addressing(PowerPCCPU *cpu) 503 { 504 if (cpu->vhyp) { 505 return !vhyp_cpu_in_nested(cpu); 506 } 507 return false; 508 } 509 510 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, 511 MMUAccessType access_type, 512 vaddr eaddr, uint64_t pid, 513 ppc_v3_pate_t pate, hwaddr *g_raddr, 514 int *g_prot, int *g_page_size, 515 int mmu_idx, uint64_t lpid, 516 bool guest_visible) 517 { 518 CPUState *cs = CPU(cpu); 519 CPUPPCState *env = &cpu->env; 520 uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte; 521 int fault_cause = 0, h_page_size, h_prot; 522 hwaddr h_raddr, pte_addr; 523 int ret; 524 525 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 526 " mmu_idx %u pid %"PRIu64"\n", 527 __func__, access_str(access_type), 528 eaddr, mmu_idx, pid); 529 530 prtb = (pate.dw1 & PATE1_R_PRTB); 531 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); 532 if (prtb & (size - 1)) { 533 /* Process Table not properly aligned */ 534 if (guest_visible) { 535 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); 536 } 537 return 1; 538 } 539 540 /* Index Process Table by PID to Find Corresponding Process Table Entry */ 541 offset = pid * sizeof(struct prtb_entry); 542 if (offset >= size) { 543 /* offset exceeds size of the process table */ 544 if (guest_visible) { 545 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); 546 } 547 return 1; 548 } 549 prtbe_addr = prtb + offset; 550 551 if (vhyp_flat_addressing(cpu)) { 552 prtbe0 = ldq_phys(cs->as, prtbe_addr); 553 } else { 554 /* 555 * Process table addresses are subject to partition-scoped 556 * translation 557 * 558 * On a Radix host, the partition-scoped page table for LPID=0 559 * is only used to translate the effective addresses of the 560 * process table entries. 561 */ 562 /* mmu_idx is 5 because we're translating from hypervisor scope */ 563 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, 564 prtbe_addr, pate, &h_raddr, 565 &h_prot, &h_page_size, true, 566 5, lpid, guest_visible); 567 if (ret) { 568 return ret; 569 } 570 prtbe0 = ldq_phys(cs->as, h_raddr); 571 } 572 573 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ 574 *g_page_size = PRTBE_R_GET_RTS(prtbe0); 575 base_addr = prtbe0 & PRTBE_R_RPDB; 576 nls = prtbe0 & PRTBE_R_RPDS; 577 if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) { 578 /* 579 * Can treat process table addresses as real addresses 580 */ 581 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr, 582 nls, g_raddr, g_page_size, &pte, 583 &fault_cause, &pte_addr); 584 if (ret) { 585 /* No valid PTE */ 586 if (guest_visible) { 587 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 588 } 589 return ret; 590 } 591 } else { 592 uint64_t rpn, mask; 593 int level = 0; 594 595 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ 596 index &= ((1UL << nls) - 1); /* Mask */ 597 pte_addr = base_addr + (index * sizeof(pte)); 598 599 /* 600 * Each process table address is subject to a partition-scoped 601 * translation 602 */ 603 do { 604 /* mmu_idx is 5 because we're translating from hypervisor scope */ 605 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, 606 pte_addr, pate, &h_raddr, 607 &h_prot, &h_page_size, 608 true, 5, lpid, 609 guest_visible); 610 if (ret) { 611 return ret; 612 } 613 614 if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) { 615 fault_cause |= DSISR_R_BADCONFIG; 616 ret = 1; 617 } else { 618 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, 619 &h_raddr, &nls, g_page_size, 620 &pte, &fault_cause); 621 } 622 623 if (ret) { 624 /* No valid pte */ 625 if (guest_visible) { 626 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 627 } 628 return ret; 629 } 630 pte_addr = h_raddr; 631 } while (!(pte & R_PTE_LEAF)); 632 633 rpn = pte & R_PTE_RPN; 634 mask = (1UL << *g_page_size) - 1; 635 636 /* Or high bits of rpn and low bits to ea to form whole real addr */ 637 *g_raddr = (rpn & ~mask) | (eaddr & mask); 638 } 639 640 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, 641 g_prot, mmu_idx, false)) { 642 /* Access denied due to protection */ 643 if (guest_visible) { 644 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); 645 } 646 return 1; 647 } 648 649 if (guest_visible) { 650 /* R/C bits not appropriately set for access */ 651 if (ppc_radix64_check_rc(access_type, pte)) { 652 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC); 653 return 1; 654 } 655 } 656 657 return 0; 658 } 659 660 /* 661 * Radix tree translation is a 2 steps translation process: 662 * 663 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr 664 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr 665 * 666 * MSR[HV] 667 * +-------------+----------------+---------------+ 668 * | | HV = 0 | HV = 1 | 669 * +-------------+----------------+---------------+ 670 * | Relocation | Partition | No | 671 * | = Off | Scoped | Translation | 672 * Relocation +-------------+----------------+---------------+ 673 * | Relocation | Partition & | Process | 674 * | = On | Process Scoped | Scoped | 675 * +-------------+----------------+---------------+ 676 */ 677 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, 678 MMUAccessType access_type, hwaddr *raddr, 679 int *psizep, int *protp, int mmu_idx, 680 bool guest_visible) 681 { 682 CPUPPCState *env = &cpu->env; 683 uint64_t lpid, pid; 684 ppc_v3_pate_t pate; 685 int psize, prot; 686 hwaddr g_raddr; 687 bool relocation; 688 689 assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp)); 690 691 relocation = !mmuidx_real(mmu_idx); 692 693 /* HV or virtual hypervisor Real Mode Access */ 694 if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) { 695 /* In real mode top 4 effective addr bits (mostly) ignored */ 696 *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 697 698 /* In HV mode, add HRMOR if top EA bit is clear */ 699 if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { 700 if (!(eaddr >> 63)) { 701 *raddr |= env->spr[SPR_HRMOR]; 702 } 703 } 704 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 705 *psizep = TARGET_PAGE_BITS; 706 return true; 707 } 708 709 /* 710 * Check UPRT (we avoid the check in real mode to deal with 711 * transitional states during kexec. 712 */ 713 if (guest_visible && !ppc64_use_proc_tbl(cpu)) { 714 qemu_log_mask(LOG_GUEST_ERROR, 715 "LPCR:UPRT not set in radix mode ! LPCR=" 716 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); 717 } 718 719 /* Virtual Mode Access - get the fully qualified address */ 720 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) { 721 if (guest_visible) { 722 ppc_radix64_raise_segi(cpu, access_type, eaddr); 723 } 724 return false; 725 } 726 727 /* Get Partition Table */ 728 if (cpu->vhyp) { 729 if (!cpu->vhyp_class->get_pate(cpu->vhyp, cpu, lpid, &pate)) { 730 if (guest_visible) { 731 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 732 DSISR_R_BADCONFIG); 733 } 734 return false; 735 } 736 } else { 737 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { 738 if (guest_visible) { 739 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 740 DSISR_R_BADCONFIG); 741 } 742 return false; 743 } 744 if (!validate_pate(cpu, lpid, &pate)) { 745 if (guest_visible) { 746 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, 747 DSISR_R_BADCONFIG); 748 } 749 return false; 750 } 751 } 752 753 *psizep = INT_MAX; 754 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 755 756 /* 757 * Perform process-scoped translation if relocation enabled. 758 * 759 * - Translates an effective address to a host real address in 760 * quadrants 0 and 3 when HV=1. 761 * 762 * - Translates an effective address to a guest real address. 763 */ 764 if (relocation) { 765 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid, 766 pate, &g_raddr, &prot, 767 &psize, mmu_idx, lpid, 768 guest_visible); 769 if (ret) { 770 return false; 771 } 772 *psizep = MIN(*psizep, psize); 773 *protp &= prot; 774 } else { 775 g_raddr = eaddr & R_EADDR_MASK; 776 } 777 778 if (vhyp_flat_addressing(cpu)) { 779 *raddr = g_raddr; 780 } else { 781 /* 782 * Perform partition-scoped translation if !HV or HV access to 783 * quadrants 1 or 2. Translates a guest real address to a host 784 * real address. 785 */ 786 if (lpid || !mmuidx_hv(mmu_idx)) { 787 int ret; 788 789 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, 790 g_raddr, pate, raddr, 791 &prot, &psize, false, 792 mmu_idx, lpid, 793 guest_visible); 794 if (ret) { 795 return false; 796 } 797 *psizep = MIN(*psizep, psize); 798 *protp &= prot; 799 } else { 800 *raddr = g_raddr; 801 } 802 } 803 804 return true; 805 } 806 807 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, 808 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, 809 bool guest_visible) 810 { 811 bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp, 812 psizep, protp, mmu_idx, guest_visible); 813 814 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx 815 " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n", 816 __func__, access_str(access_type), 817 eaddr, mmu_idx, 818 *protp & PAGE_READ ? 'r' : '-', 819 *protp & PAGE_WRITE ? 'w' : '-', 820 *protp & PAGE_EXEC ? 'x' : '-', 821 *raddrp); 822 823 return ret; 824 } 825