1 /* 2 * PowerPC MMU, TLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "kvm_ppc.h" 26 #include "internal.h" 27 #include "mmu-hash32.h" 28 #include "exec/log.h" 29 30 /* #define DEBUG_BAT */ 31 32 #ifdef DEBUG_BATS 33 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 34 #else 35 # define LOG_BATS(...) do { } while (0) 36 #endif 37 38 struct mmu_ctx_hash32 { 39 hwaddr raddr; /* Real address */ 40 int prot; /* Protection bits */ 41 int key; /* Access key */ 42 }; 43 44 static int ppc_hash32_pp_prot(int key, int pp, int nx) 45 { 46 int prot; 47 48 if (key == 0) { 49 switch (pp) { 50 case 0x0: 51 case 0x1: 52 case 0x2: 53 prot = PAGE_READ | PAGE_WRITE; 54 break; 55 56 case 0x3: 57 prot = PAGE_READ; 58 break; 59 60 default: 61 abort(); 62 } 63 } else { 64 switch (pp) { 65 case 0x0: 66 prot = 0; 67 break; 68 69 case 0x1: 70 case 0x3: 71 prot = PAGE_READ; 72 break; 73 74 case 0x2: 75 prot = PAGE_READ | PAGE_WRITE; 76 break; 77 78 default: 79 abort(); 80 } 81 } 82 if (nx == 0) { 83 prot |= PAGE_EXEC; 84 } 85 86 return prot; 87 } 88 89 static int ppc_hash32_pte_prot(PowerPCCPU *cpu, 90 target_ulong sr, ppc_hash_pte32_t pte) 91 { 92 CPUPPCState *env = &cpu->env; 93 unsigned pp, key; 94 95 key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 96 pp = pte.pte1 & HPTE32_R_PP; 97 98 return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); 99 } 100 101 static target_ulong hash32_bat_size(PowerPCCPU *cpu, 102 target_ulong batu, target_ulong batl) 103 { 104 CPUPPCState *env = &cpu->env; 105 106 if ((msr_pr && !(batu & BATU32_VP)) 107 || (!msr_pr && !(batu & BATU32_VS))) { 108 return 0; 109 } 110 111 return BATU32_BEPI & ~((batu & BATU32_BL) << 15); 112 } 113 114 static int hash32_bat_prot(PowerPCCPU *cpu, 115 target_ulong batu, target_ulong batl) 116 { 117 int pp, prot; 118 119 prot = 0; 120 pp = batl & BATL32_PP; 121 if (pp != 0) { 122 prot = PAGE_READ | PAGE_EXEC; 123 if (pp == 0x2) { 124 prot |= PAGE_WRITE; 125 } 126 } 127 return prot; 128 } 129 130 static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, 131 target_ulong batu, target_ulong batl) 132 { 133 if (!(batl & BATL32_601_V)) { 134 return 0; 135 } 136 137 return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); 138 } 139 140 static int hash32_bat_601_prot(PowerPCCPU *cpu, 141 target_ulong batu, target_ulong batl) 142 { 143 CPUPPCState *env = &cpu->env; 144 int key, pp; 145 146 pp = batu & BATU32_601_PP; 147 if (msr_pr == 0) { 148 key = !!(batu & BATU32_601_KS); 149 } else { 150 key = !!(batu & BATU32_601_KP); 151 } 152 return ppc_hash32_pp_prot(key, pp, 0); 153 } 154 155 static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, 156 MMUAccessType access_type, int *prot) 157 { 158 CPUPPCState *env = &cpu->env; 159 target_ulong *BATlt, *BATut; 160 bool ifetch = access_type == MMU_INST_FETCH; 161 int i; 162 163 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 164 ifetch ? 'I' : 'D', ea); 165 if (ifetch) { 166 BATlt = env->IBAT[1]; 167 BATut = env->IBAT[0]; 168 } else { 169 BATlt = env->DBAT[1]; 170 BATut = env->DBAT[0]; 171 } 172 for (i = 0; i < env->nb_BATs; i++) { 173 target_ulong batu = BATut[i]; 174 target_ulong batl = BATlt[i]; 175 target_ulong mask; 176 177 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 178 mask = hash32_bat_601_size(cpu, batu, batl); 179 } else { 180 mask = hash32_bat_size(cpu, batu, batl); 181 } 182 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 183 " BATl " TARGET_FMT_lx "\n", __func__, 184 ifetch ? 'I' : 'D', i, ea, batu, batl); 185 186 if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { 187 hwaddr raddr = (batl & mask) | (ea & ~mask); 188 189 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 190 *prot = hash32_bat_601_prot(cpu, batu, batl); 191 } else { 192 *prot = hash32_bat_prot(cpu, batu, batl); 193 } 194 195 return raddr & TARGET_PAGE_MASK; 196 } 197 } 198 199 /* No hit */ 200 #if defined(DEBUG_BATS) 201 if (qemu_log_enabled()) { 202 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); 203 for (i = 0; i < 4; i++) { 204 BATu = &BATut[i]; 205 BATl = &BATlt[i]; 206 BEPIu = *BATu & BATU32_BEPIU; 207 BEPIl = *BATu & BATU32_BEPIL; 208 bl = (*BATu & 0x00001FFC) << 15; 209 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 210 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 211 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 212 __func__, ifetch ? 'I' : 'D', i, ea, 213 *BATu, *BATl, BEPIu, BEPIl, bl); 214 } 215 } 216 #endif 217 218 return -1; 219 } 220 221 static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, 222 target_ulong eaddr, 223 MMUAccessType access_type, 224 hwaddr *raddr, int *prot, 225 bool guest_visible) 226 { 227 CPUState *cs = CPU(cpu); 228 CPUPPCState *env = &cpu->env; 229 int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 230 231 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 232 233 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 234 /* 235 * Memory-forced I/O controller interface access 236 * 237 * If T=1 and BUID=x'07F', the 601 performs a memory access 238 * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 239 */ 240 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 241 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 242 return true; 243 } 244 245 if (access_type == MMU_INST_FETCH) { 246 /* No code fetch is allowed in direct-store areas */ 247 if (guest_visible) { 248 cs->exception_index = POWERPC_EXCP_ISI; 249 env->error_code = 0x10000000; 250 } 251 return false; 252 } 253 254 /* 255 * From ppc_cpu_get_phys_page_debug, env->access_type is not set. 256 * Assume ACCESS_INT for that case. 257 */ 258 switch (guest_visible ? env->access_type : ACCESS_INT) { 259 case ACCESS_INT: 260 /* Integer load/store : only access allowed */ 261 break; 262 case ACCESS_FLOAT: 263 /* Floating point load/store */ 264 cs->exception_index = POWERPC_EXCP_ALIGN; 265 env->error_code = POWERPC_EXCP_ALIGN_FP; 266 env->spr[SPR_DAR] = eaddr; 267 return false; 268 case ACCESS_RES: 269 /* lwarx, ldarx or srwcx. */ 270 env->error_code = 0; 271 env->spr[SPR_DAR] = eaddr; 272 if (access_type == MMU_DATA_STORE) { 273 env->spr[SPR_DSISR] = 0x06000000; 274 } else { 275 env->spr[SPR_DSISR] = 0x04000000; 276 } 277 return false; 278 case ACCESS_CACHE: 279 /* 280 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 281 * 282 * Should make the instruction do no-op. As it already do 283 * no-op, it's quite easy :-) 284 */ 285 *raddr = eaddr; 286 return true; 287 case ACCESS_EXT: 288 /* eciwx or ecowx */ 289 cs->exception_index = POWERPC_EXCP_DSI; 290 env->error_code = 0; 291 env->spr[SPR_DAR] = eaddr; 292 if (access_type == MMU_DATA_STORE) { 293 env->spr[SPR_DSISR] = 0x06100000; 294 } else { 295 env->spr[SPR_DSISR] = 0x04100000; 296 } 297 return false; 298 default: 299 cpu_abort(cs, "ERROR: insn should not need address translation\n"); 300 } 301 302 *prot = key ? PAGE_READ | PAGE_WRITE : PAGE_READ; 303 if (*prot & prot_for_access_type(access_type)) { 304 *raddr = eaddr; 305 return true; 306 } 307 308 if (guest_visible) { 309 cs->exception_index = POWERPC_EXCP_DSI; 310 env->error_code = 0; 311 env->spr[SPR_DAR] = eaddr; 312 if (access_type == MMU_DATA_STORE) { 313 env->spr[SPR_DSISR] = 0x0a000000; 314 } else { 315 env->spr[SPR_DSISR] = 0x08000000; 316 } 317 } 318 return false; 319 } 320 321 hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) 322 { 323 target_ulong mask = ppc_hash32_hpt_mask(cpu); 324 325 return (hash * HASH_PTEG_SIZE_32) & mask; 326 } 327 328 static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, 329 bool secondary, target_ulong ptem, 330 ppc_hash_pte32_t *pte) 331 { 332 hwaddr pte_offset = pteg_off; 333 target_ulong pte0, pte1; 334 int i; 335 336 for (i = 0; i < HPTES_PER_GROUP; i++) { 337 pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); 338 /* 339 * pte0 contains the valid bit and must be read before pte1, 340 * otherwise we might see an old pte1 with a new valid bit and 341 * thus an inconsistent hpte value 342 */ 343 smp_rmb(); 344 pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); 345 346 if ((pte0 & HPTE32_V_VALID) 347 && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) 348 && HPTE32_V_COMPARE(pte0, ptem)) { 349 pte->pte0 = pte0; 350 pte->pte1 = pte1; 351 return pte_offset; 352 } 353 354 pte_offset += HASH_PTE_SIZE_32; 355 } 356 357 return -1; 358 } 359 360 static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) 361 { 362 target_ulong base = ppc_hash32_hpt_base(cpu); 363 hwaddr offset = pte_offset + 6; 364 365 /* The HW performs a non-atomic byte update */ 366 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 367 } 368 369 static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) 370 { 371 target_ulong base = ppc_hash32_hpt_base(cpu); 372 hwaddr offset = pte_offset + 7; 373 374 /* The HW performs a non-atomic byte update */ 375 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 376 } 377 378 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, 379 target_ulong sr, target_ulong eaddr, 380 ppc_hash_pte32_t *pte) 381 { 382 hwaddr pteg_off, pte_offset; 383 hwaddr hash; 384 uint32_t vsid, pgidx, ptem; 385 386 vsid = sr & SR32_VSID; 387 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; 388 hash = vsid ^ pgidx; 389 ptem = (vsid << 7) | (pgidx >> 10); 390 391 /* Page address translation */ 392 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 393 " htab_mask " TARGET_FMT_plx 394 " hash " TARGET_FMT_plx "\n", 395 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 396 397 /* Primary PTEG lookup */ 398 qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 399 " vsid=%" PRIx32 " ptem=%" PRIx32 400 " hash=" TARGET_FMT_plx "\n", 401 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), 402 vsid, ptem, hash); 403 pteg_off = get_pteg_offset32(cpu, hash); 404 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); 405 if (pte_offset == -1) { 406 /* Secondary PTEG lookup */ 407 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 408 " vsid=%" PRIx32 " api=%" PRIx32 409 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), 410 ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); 411 pteg_off = get_pteg_offset32(cpu, ~hash); 412 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); 413 } 414 415 return pte_offset; 416 } 417 418 static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, 419 target_ulong eaddr) 420 { 421 hwaddr rpn = pte.pte1 & HPTE32_R_RPN; 422 hwaddr mask = ~TARGET_PAGE_MASK; 423 424 return (rpn & ~mask) | (eaddr & mask); 425 } 426 427 static bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, 428 MMUAccessType access_type, 429 hwaddr *raddrp, int *psizep, int *protp, 430 bool guest_visible) 431 { 432 CPUState *cs = CPU(cpu); 433 CPUPPCState *env = &cpu->env; 434 target_ulong sr; 435 hwaddr pte_offset; 436 ppc_hash_pte32_t pte; 437 int prot; 438 int need_prot; 439 hwaddr raddr; 440 441 /* There are no hash32 large pages. */ 442 *psizep = TARGET_PAGE_BITS; 443 444 /* 1. Handle real mode accesses */ 445 if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { 446 /* Translation is off */ 447 *raddrp = eaddr; 448 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 449 return true; 450 } 451 452 need_prot = prot_for_access_type(access_type); 453 454 /* 2. Check Block Address Translation entries (BATs) */ 455 if (env->nb_BATs != 0) { 456 raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, protp); 457 if (raddr != -1) { 458 if (need_prot & ~*protp) { 459 if (guest_visible) { 460 if (access_type == MMU_INST_FETCH) { 461 cs->exception_index = POWERPC_EXCP_ISI; 462 env->error_code = 0x08000000; 463 } else { 464 cs->exception_index = POWERPC_EXCP_DSI; 465 env->error_code = 0; 466 env->spr[SPR_DAR] = eaddr; 467 if (access_type == MMU_DATA_STORE) { 468 env->spr[SPR_DSISR] = 0x0a000000; 469 } else { 470 env->spr[SPR_DSISR] = 0x08000000; 471 } 472 } 473 } 474 return false; 475 } 476 *raddrp = raddr; 477 return true; 478 } 479 } 480 481 /* 3. Look up the Segment Register */ 482 sr = env->sr[eaddr >> 28]; 483 484 /* 4. Handle direct store segments */ 485 if (sr & SR32_T) { 486 return ppc_hash32_direct_store(cpu, sr, eaddr, access_type, 487 raddrp, protp, guest_visible); 488 } 489 490 /* 5. Check for segment level no-execute violation */ 491 if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) { 492 if (guest_visible) { 493 cs->exception_index = POWERPC_EXCP_ISI; 494 env->error_code = 0x10000000; 495 } 496 return false; 497 } 498 499 /* 6. Locate the PTE in the hash table */ 500 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 501 if (pte_offset == -1) { 502 if (guest_visible) { 503 if (access_type == MMU_INST_FETCH) { 504 cs->exception_index = POWERPC_EXCP_ISI; 505 env->error_code = 0x40000000; 506 } else { 507 cs->exception_index = POWERPC_EXCP_DSI; 508 env->error_code = 0; 509 env->spr[SPR_DAR] = eaddr; 510 if (access_type == MMU_DATA_STORE) { 511 env->spr[SPR_DSISR] = 0x42000000; 512 } else { 513 env->spr[SPR_DSISR] = 0x40000000; 514 } 515 } 516 } 517 return false; 518 } 519 qemu_log_mask(CPU_LOG_MMU, 520 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); 521 522 /* 7. Check access permissions */ 523 524 prot = ppc_hash32_pte_prot(cpu, sr, pte); 525 526 if (need_prot & ~prot) { 527 /* Access right violation */ 528 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 529 if (guest_visible) { 530 if (access_type == MMU_INST_FETCH) { 531 cs->exception_index = POWERPC_EXCP_ISI; 532 env->error_code = 0x08000000; 533 } else { 534 cs->exception_index = POWERPC_EXCP_DSI; 535 env->error_code = 0; 536 env->spr[SPR_DAR] = eaddr; 537 if (access_type == MMU_DATA_STORE) { 538 env->spr[SPR_DSISR] = 0x0a000000; 539 } else { 540 env->spr[SPR_DSISR] = 0x08000000; 541 } 542 } 543 } 544 return false; 545 } 546 547 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 548 549 /* 8. Update PTE referenced and changed bits if necessary */ 550 551 if (!(pte.pte1 & HPTE32_R_R)) { 552 ppc_hash32_set_r(cpu, pte_offset, pte.pte1); 553 } 554 if (!(pte.pte1 & HPTE32_R_C)) { 555 if (access_type == MMU_DATA_STORE) { 556 ppc_hash32_set_c(cpu, pte_offset, pte.pte1); 557 } else { 558 /* 559 * Treat the page as read-only for now, so that a later write 560 * will pass through this function again to set the C bit 561 */ 562 prot &= ~PAGE_WRITE; 563 } 564 } 565 566 /* 9. Determine the real address from the PTE */ 567 568 *raddrp = ppc_hash32_pte_raddr(sr, pte, eaddr); 569 *protp = prot; 570 return true; 571 } 572 573 int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, 574 MMUAccessType access_type, int mmu_idx) 575 { 576 CPUState *cs = CPU(cpu); 577 int page_size, prot; 578 hwaddr raddr; 579 580 /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */ 581 if (!ppc_hash32_xlate(cpu, eaddr, access_type, &raddr, 582 &page_size, &prot, true)) { 583 return 1; 584 } 585 586 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 587 prot, mmu_idx, 1UL << page_size); 588 return 0; 589 } 590 591 hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) 592 { 593 int psize, prot; 594 hwaddr raddr; 595 596 if (!ppc_hash32_xlate(cpu, eaddr, MMU_DATA_LOAD, &raddr, 597 &psize, &prot, false)) { 598 return -1; 599 } 600 601 return raddr & TARGET_PAGE_MASK; 602 } 603