1 /* 2 * PowerPC MMU, TLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "sysemu/kvm.h" 25 #include "kvm_ppc.h" 26 #include "internal.h" 27 #include "mmu-hash32.h" 28 #include "exec/log.h" 29 30 /* #define DEBUG_BAT */ 31 32 #ifdef DEBUG_BATS 33 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 34 #else 35 # define LOG_BATS(...) do { } while (0) 36 #endif 37 38 struct mmu_ctx_hash32 { 39 hwaddr raddr; /* Real address */ 40 int prot; /* Protection bits */ 41 int key; /* Access key */ 42 }; 43 44 static int ppc_hash32_pp_prot(int key, int pp, int nx) 45 { 46 int prot; 47 48 if (key == 0) { 49 switch (pp) { 50 case 0x0: 51 case 0x1: 52 case 0x2: 53 prot = PAGE_READ | PAGE_WRITE; 54 break; 55 56 case 0x3: 57 prot = PAGE_READ; 58 break; 59 60 default: 61 abort(); 62 } 63 } else { 64 switch (pp) { 65 case 0x0: 66 prot = 0; 67 break; 68 69 case 0x1: 70 case 0x3: 71 prot = PAGE_READ; 72 break; 73 74 case 0x2: 75 prot = PAGE_READ | PAGE_WRITE; 76 break; 77 78 default: 79 abort(); 80 } 81 } 82 if (nx == 0) { 83 prot |= PAGE_EXEC; 84 } 85 86 return prot; 87 } 88 89 static int ppc_hash32_pte_prot(PowerPCCPU *cpu, 90 target_ulong sr, ppc_hash_pte32_t pte) 91 { 92 CPUPPCState *env = &cpu->env; 93 unsigned pp, key; 94 95 key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 96 pp = pte.pte1 & HPTE32_R_PP; 97 98 return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); 99 } 100 101 static target_ulong hash32_bat_size(PowerPCCPU *cpu, 102 target_ulong batu, target_ulong batl) 103 { 104 CPUPPCState *env = &cpu->env; 105 106 if ((msr_pr && !(batu & BATU32_VP)) 107 || (!msr_pr && !(batu & BATU32_VS))) { 108 return 0; 109 } 110 111 return BATU32_BEPI & ~((batu & BATU32_BL) << 15); 112 } 113 114 static int hash32_bat_prot(PowerPCCPU *cpu, 115 target_ulong batu, target_ulong batl) 116 { 117 int pp, prot; 118 119 prot = 0; 120 pp = batl & BATL32_PP; 121 if (pp != 0) { 122 prot = PAGE_READ | PAGE_EXEC; 123 if (pp == 0x2) { 124 prot |= PAGE_WRITE; 125 } 126 } 127 return prot; 128 } 129 130 static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, 131 target_ulong batu, target_ulong batl) 132 { 133 if (!(batl & BATL32_601_V)) { 134 return 0; 135 } 136 137 return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); 138 } 139 140 static int hash32_bat_601_prot(PowerPCCPU *cpu, 141 target_ulong batu, target_ulong batl) 142 { 143 CPUPPCState *env = &cpu->env; 144 int key, pp; 145 146 pp = batu & BATU32_601_PP; 147 if (msr_pr == 0) { 148 key = !!(batu & BATU32_601_KS); 149 } else { 150 key = !!(batu & BATU32_601_KP); 151 } 152 return ppc_hash32_pp_prot(key, pp, 0); 153 } 154 155 static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, 156 MMUAccessType access_type, int *prot) 157 { 158 CPUPPCState *env = &cpu->env; 159 target_ulong *BATlt, *BATut; 160 bool ifetch = access_type == MMU_INST_FETCH; 161 int i; 162 163 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 164 ifetch ? 'I' : 'D', ea); 165 if (ifetch) { 166 BATlt = env->IBAT[1]; 167 BATut = env->IBAT[0]; 168 } else { 169 BATlt = env->DBAT[1]; 170 BATut = env->DBAT[0]; 171 } 172 for (i = 0; i < env->nb_BATs; i++) { 173 target_ulong batu = BATut[i]; 174 target_ulong batl = BATlt[i]; 175 target_ulong mask; 176 177 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 178 mask = hash32_bat_601_size(cpu, batu, batl); 179 } else { 180 mask = hash32_bat_size(cpu, batu, batl); 181 } 182 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 183 " BATl " TARGET_FMT_lx "\n", __func__, 184 ifetch ? 'I' : 'D', i, ea, batu, batl); 185 186 if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { 187 hwaddr raddr = (batl & mask) | (ea & ~mask); 188 189 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 190 *prot = hash32_bat_601_prot(cpu, batu, batl); 191 } else { 192 *prot = hash32_bat_prot(cpu, batu, batl); 193 } 194 195 return raddr & TARGET_PAGE_MASK; 196 } 197 } 198 199 /* No hit */ 200 #if defined(DEBUG_BATS) 201 if (qemu_log_enabled()) { 202 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); 203 for (i = 0; i < 4; i++) { 204 BATu = &BATut[i]; 205 BATl = &BATlt[i]; 206 BEPIu = *BATu & BATU32_BEPIU; 207 BEPIl = *BATu & BATU32_BEPIL; 208 bl = (*BATu & 0x00001FFC) << 15; 209 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 210 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 211 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 212 __func__, ifetch ? 'I' : 'D', i, ea, 213 *BATu, *BATl, BEPIu, BEPIl, bl); 214 } 215 } 216 #endif 217 218 return -1; 219 } 220 221 static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, 222 target_ulong eaddr, 223 MMUAccessType access_type, 224 hwaddr *raddr, int *prot) 225 { 226 CPUState *cs = CPU(cpu); 227 CPUPPCState *env = &cpu->env; 228 int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 229 230 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 231 232 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 233 /* 234 * Memory-forced I/O controller interface access 235 * 236 * If T=1 and BUID=x'07F', the 601 performs a memory access 237 * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 238 */ 239 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 240 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 241 return 0; 242 } 243 244 if (access_type == MMU_INST_FETCH) { 245 /* No code fetch is allowed in direct-store areas */ 246 cs->exception_index = POWERPC_EXCP_ISI; 247 env->error_code = 0x10000000; 248 return 1; 249 } 250 251 switch (env->access_type) { 252 case ACCESS_INT: 253 /* Integer load/store : only access allowed */ 254 break; 255 case ACCESS_FLOAT: 256 /* Floating point load/store */ 257 cs->exception_index = POWERPC_EXCP_ALIGN; 258 env->error_code = POWERPC_EXCP_ALIGN_FP; 259 env->spr[SPR_DAR] = eaddr; 260 return 1; 261 case ACCESS_RES: 262 /* lwarx, ldarx or srwcx. */ 263 env->error_code = 0; 264 env->spr[SPR_DAR] = eaddr; 265 if (access_type == MMU_DATA_STORE) { 266 env->spr[SPR_DSISR] = 0x06000000; 267 } else { 268 env->spr[SPR_DSISR] = 0x04000000; 269 } 270 return 1; 271 case ACCESS_CACHE: 272 /* 273 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 274 * 275 * Should make the instruction do no-op. As it already do 276 * no-op, it's quite easy :-) 277 */ 278 *raddr = eaddr; 279 return 0; 280 case ACCESS_EXT: 281 /* eciwx or ecowx */ 282 cs->exception_index = POWERPC_EXCP_DSI; 283 env->error_code = 0; 284 env->spr[SPR_DAR] = eaddr; 285 if (access_type == MMU_DATA_STORE) { 286 env->spr[SPR_DSISR] = 0x06100000; 287 } else { 288 env->spr[SPR_DSISR] = 0x04100000; 289 } 290 return 1; 291 default: 292 cpu_abort(cs, "ERROR: instruction should not need " 293 "address translation\n"); 294 } 295 if ((access_type == MMU_DATA_STORE || key != 1) && 296 (access_type == MMU_DATA_LOAD || key != 0)) { 297 *raddr = eaddr; 298 return 0; 299 } else { 300 cs->exception_index = POWERPC_EXCP_DSI; 301 env->error_code = 0; 302 env->spr[SPR_DAR] = eaddr; 303 if (access_type == MMU_DATA_STORE) { 304 env->spr[SPR_DSISR] = 0x0a000000; 305 } else { 306 env->spr[SPR_DSISR] = 0x08000000; 307 } 308 return 1; 309 } 310 } 311 312 hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) 313 { 314 target_ulong mask = ppc_hash32_hpt_mask(cpu); 315 316 return (hash * HASH_PTEG_SIZE_32) & mask; 317 } 318 319 static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, 320 bool secondary, target_ulong ptem, 321 ppc_hash_pte32_t *pte) 322 { 323 hwaddr pte_offset = pteg_off; 324 target_ulong pte0, pte1; 325 int i; 326 327 for (i = 0; i < HPTES_PER_GROUP; i++) { 328 pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); 329 /* 330 * pte0 contains the valid bit and must be read before pte1, 331 * otherwise we might see an old pte1 with a new valid bit and 332 * thus an inconsistent hpte value 333 */ 334 smp_rmb(); 335 pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); 336 337 if ((pte0 & HPTE32_V_VALID) 338 && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) 339 && HPTE32_V_COMPARE(pte0, ptem)) { 340 pte->pte0 = pte0; 341 pte->pte1 = pte1; 342 return pte_offset; 343 } 344 345 pte_offset += HASH_PTE_SIZE_32; 346 } 347 348 return -1; 349 } 350 351 static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) 352 { 353 target_ulong base = ppc_hash32_hpt_base(cpu); 354 hwaddr offset = pte_offset + 6; 355 356 /* The HW performs a non-atomic byte update */ 357 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 358 } 359 360 static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) 361 { 362 target_ulong base = ppc_hash32_hpt_base(cpu); 363 hwaddr offset = pte_offset + 7; 364 365 /* The HW performs a non-atomic byte update */ 366 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 367 } 368 369 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, 370 target_ulong sr, target_ulong eaddr, 371 ppc_hash_pte32_t *pte) 372 { 373 hwaddr pteg_off, pte_offset; 374 hwaddr hash; 375 uint32_t vsid, pgidx, ptem; 376 377 vsid = sr & SR32_VSID; 378 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; 379 hash = vsid ^ pgidx; 380 ptem = (vsid << 7) | (pgidx >> 10); 381 382 /* Page address translation */ 383 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 384 " htab_mask " TARGET_FMT_plx 385 " hash " TARGET_FMT_plx "\n", 386 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 387 388 /* Primary PTEG lookup */ 389 qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 390 " vsid=%" PRIx32 " ptem=%" PRIx32 391 " hash=" TARGET_FMT_plx "\n", 392 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), 393 vsid, ptem, hash); 394 pteg_off = get_pteg_offset32(cpu, hash); 395 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); 396 if (pte_offset == -1) { 397 /* Secondary PTEG lookup */ 398 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 399 " vsid=%" PRIx32 " api=%" PRIx32 400 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), 401 ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); 402 pteg_off = get_pteg_offset32(cpu, ~hash); 403 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); 404 } 405 406 return pte_offset; 407 } 408 409 static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, 410 target_ulong eaddr) 411 { 412 hwaddr rpn = pte.pte1 & HPTE32_R_RPN; 413 hwaddr mask = ~TARGET_PAGE_MASK; 414 415 return (rpn & ~mask) | (eaddr & mask); 416 } 417 418 int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, 419 int mmu_idx) 420 { 421 CPUState *cs = CPU(cpu); 422 CPUPPCState *env = &cpu->env; 423 target_ulong sr; 424 hwaddr pte_offset; 425 ppc_hash_pte32_t pte; 426 int prot; 427 int need_prot; 428 MMUAccessType access_type; 429 hwaddr raddr; 430 431 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 432 access_type = rwx; 433 need_prot = prot_for_access_type(access_type); 434 435 /* 1. Handle real mode accesses */ 436 if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { 437 /* Translation is off */ 438 raddr = eaddr; 439 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 440 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 441 TARGET_PAGE_SIZE); 442 return 0; 443 } 444 445 /* 2. Check Block Address Translation entries (BATs) */ 446 if (env->nb_BATs != 0) { 447 raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, &prot); 448 if (raddr != -1) { 449 if (need_prot & ~prot) { 450 if (access_type == MMU_INST_FETCH) { 451 cs->exception_index = POWERPC_EXCP_ISI; 452 env->error_code = 0x08000000; 453 } else { 454 cs->exception_index = POWERPC_EXCP_DSI; 455 env->error_code = 0; 456 env->spr[SPR_DAR] = eaddr; 457 if (access_type == MMU_DATA_STORE) { 458 env->spr[SPR_DSISR] = 0x0a000000; 459 } else { 460 env->spr[SPR_DSISR] = 0x08000000; 461 } 462 } 463 return 1; 464 } 465 466 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 467 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 468 TARGET_PAGE_SIZE); 469 return 0; 470 } 471 } 472 473 /* 3. Look up the Segment Register */ 474 sr = env->sr[eaddr >> 28]; 475 476 /* 4. Handle direct store segments */ 477 if (sr & SR32_T) { 478 if (ppc_hash32_direct_store(cpu, sr, eaddr, access_type, 479 &raddr, &prot) == 0) { 480 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 481 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 482 TARGET_PAGE_SIZE); 483 return 0; 484 } else { 485 return 1; 486 } 487 } 488 489 /* 5. Check for segment level no-execute violation */ 490 if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) { 491 cs->exception_index = POWERPC_EXCP_ISI; 492 env->error_code = 0x10000000; 493 return 1; 494 } 495 496 /* 6. Locate the PTE in the hash table */ 497 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 498 if (pte_offset == -1) { 499 if (access_type == MMU_INST_FETCH) { 500 cs->exception_index = POWERPC_EXCP_ISI; 501 env->error_code = 0x40000000; 502 } else { 503 cs->exception_index = POWERPC_EXCP_DSI; 504 env->error_code = 0; 505 env->spr[SPR_DAR] = eaddr; 506 if (access_type == MMU_DATA_STORE) { 507 env->spr[SPR_DSISR] = 0x42000000; 508 } else { 509 env->spr[SPR_DSISR] = 0x40000000; 510 } 511 } 512 513 return 1; 514 } 515 qemu_log_mask(CPU_LOG_MMU, 516 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); 517 518 /* 7. Check access permissions */ 519 520 prot = ppc_hash32_pte_prot(cpu, sr, pte); 521 522 if (need_prot & ~prot) { 523 /* Access right violation */ 524 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 525 if (access_type == MMU_INST_FETCH) { 526 cs->exception_index = POWERPC_EXCP_ISI; 527 env->error_code = 0x08000000; 528 } else { 529 cs->exception_index = POWERPC_EXCP_DSI; 530 env->error_code = 0; 531 env->spr[SPR_DAR] = eaddr; 532 if (access_type == MMU_DATA_STORE) { 533 env->spr[SPR_DSISR] = 0x0a000000; 534 } else { 535 env->spr[SPR_DSISR] = 0x08000000; 536 } 537 } 538 return 1; 539 } 540 541 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 542 543 /* 8. Update PTE referenced and changed bits if necessary */ 544 545 if (!(pte.pte1 & HPTE32_R_R)) { 546 ppc_hash32_set_r(cpu, pte_offset, pte.pte1); 547 } 548 if (!(pte.pte1 & HPTE32_R_C)) { 549 if (access_type == MMU_DATA_STORE) { 550 ppc_hash32_set_c(cpu, pte_offset, pte.pte1); 551 } else { 552 /* 553 * Treat the page as read-only for now, so that a later write 554 * will pass through this function again to set the C bit 555 */ 556 prot &= ~PAGE_WRITE; 557 } 558 } 559 560 /* 9. Determine the real address from the PTE */ 561 562 raddr = ppc_hash32_pte_raddr(sr, pte, eaddr); 563 564 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 565 prot, mmu_idx, TARGET_PAGE_SIZE); 566 567 return 0; 568 } 569 570 hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) 571 { 572 CPUPPCState *env = &cpu->env; 573 target_ulong sr; 574 hwaddr pte_offset; 575 ppc_hash_pte32_t pte; 576 int prot; 577 578 if (msr_dr == 0) { 579 /* Translation is off */ 580 return eaddr; 581 } 582 583 if (env->nb_BATs != 0) { 584 hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot); 585 if (raddr != -1) { 586 return raddr; 587 } 588 } 589 590 sr = env->sr[eaddr >> 28]; 591 592 if (sr & SR32_T) { 593 /* FIXME: Add suitable debug support for Direct Store segments */ 594 return -1; 595 } 596 597 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 598 if (pte_offset == -1) { 599 return -1; 600 } 601 602 return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK; 603 } 604