1 /* 2 * PowerPC MMU, TLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/helper-proto.h" 25 #include "sysemu/kvm.h" 26 #include "kvm_ppc.h" 27 #include "internal.h" 28 #include "mmu-hash32.h" 29 #include "exec/log.h" 30 31 /* #define DEBUG_BAT */ 32 33 #ifdef DEBUG_BATS 34 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 35 #else 36 # define LOG_BATS(...) do { } while (0) 37 #endif 38 39 struct mmu_ctx_hash32 { 40 hwaddr raddr; /* Real address */ 41 int prot; /* Protection bits */ 42 int key; /* Access key */ 43 }; 44 45 static int ppc_hash32_pp_prot(int key, int pp, int nx) 46 { 47 int prot; 48 49 if (key == 0) { 50 switch (pp) { 51 case 0x0: 52 case 0x1: 53 case 0x2: 54 prot = PAGE_READ | PAGE_WRITE; 55 break; 56 57 case 0x3: 58 prot = PAGE_READ; 59 break; 60 61 default: 62 abort(); 63 } 64 } else { 65 switch (pp) { 66 case 0x0: 67 prot = 0; 68 break; 69 70 case 0x1: 71 case 0x3: 72 prot = PAGE_READ; 73 break; 74 75 case 0x2: 76 prot = PAGE_READ | PAGE_WRITE; 77 break; 78 79 default: 80 abort(); 81 } 82 } 83 if (nx == 0) { 84 prot |= PAGE_EXEC; 85 } 86 87 return prot; 88 } 89 90 static int ppc_hash32_pte_prot(PowerPCCPU *cpu, 91 target_ulong sr, ppc_hash_pte32_t pte) 92 { 93 CPUPPCState *env = &cpu->env; 94 unsigned pp, key; 95 96 key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 97 pp = pte.pte1 & HPTE32_R_PP; 98 99 return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); 100 } 101 102 static target_ulong hash32_bat_size(PowerPCCPU *cpu, 103 target_ulong batu, target_ulong batl) 104 { 105 CPUPPCState *env = &cpu->env; 106 107 if ((msr_pr && !(batu & BATU32_VP)) 108 || (!msr_pr && !(batu & BATU32_VS))) { 109 return 0; 110 } 111 112 return BATU32_BEPI & ~((batu & BATU32_BL) << 15); 113 } 114 115 static int hash32_bat_prot(PowerPCCPU *cpu, 116 target_ulong batu, target_ulong batl) 117 { 118 int pp, prot; 119 120 prot = 0; 121 pp = batl & BATL32_PP; 122 if (pp != 0) { 123 prot = PAGE_READ | PAGE_EXEC; 124 if (pp == 0x2) { 125 prot |= PAGE_WRITE; 126 } 127 } 128 return prot; 129 } 130 131 static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, 132 target_ulong batu, target_ulong batl) 133 { 134 if (!(batl & BATL32_601_V)) { 135 return 0; 136 } 137 138 return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); 139 } 140 141 static int hash32_bat_601_prot(PowerPCCPU *cpu, 142 target_ulong batu, target_ulong batl) 143 { 144 CPUPPCState *env = &cpu->env; 145 int key, pp; 146 147 pp = batu & BATU32_601_PP; 148 if (msr_pr == 0) { 149 key = !!(batu & BATU32_601_KS); 150 } else { 151 key = !!(batu & BATU32_601_KP); 152 } 153 return ppc_hash32_pp_prot(key, pp, 0); 154 } 155 156 static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, 157 MMUAccessType access_type, int *prot) 158 { 159 CPUPPCState *env = &cpu->env; 160 target_ulong *BATlt, *BATut; 161 bool ifetch = access_type == MMU_INST_FETCH; 162 int i; 163 164 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 165 ifetch ? 'I' : 'D', ea); 166 if (ifetch) { 167 BATlt = env->IBAT[1]; 168 BATut = env->IBAT[0]; 169 } else { 170 BATlt = env->DBAT[1]; 171 BATut = env->DBAT[0]; 172 } 173 for (i = 0; i < env->nb_BATs; i++) { 174 target_ulong batu = BATut[i]; 175 target_ulong batl = BATlt[i]; 176 target_ulong mask; 177 178 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 179 mask = hash32_bat_601_size(cpu, batu, batl); 180 } else { 181 mask = hash32_bat_size(cpu, batu, batl); 182 } 183 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 184 " BATl " TARGET_FMT_lx "\n", __func__, 185 ifetch ? 'I' : 'D', i, ea, batu, batl); 186 187 if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { 188 hwaddr raddr = (batl & mask) | (ea & ~mask); 189 190 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 191 *prot = hash32_bat_601_prot(cpu, batu, batl); 192 } else { 193 *prot = hash32_bat_prot(cpu, batu, batl); 194 } 195 196 return raddr & TARGET_PAGE_MASK; 197 } 198 } 199 200 /* No hit */ 201 #if defined(DEBUG_BATS) 202 if (qemu_log_enabled()) { 203 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); 204 for (i = 0; i < 4; i++) { 205 BATu = &BATut[i]; 206 BATl = &BATlt[i]; 207 BEPIu = *BATu & BATU32_BEPIU; 208 BEPIl = *BATu & BATU32_BEPIL; 209 bl = (*BATu & 0x00001FFC) << 15; 210 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 211 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 212 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 213 __func__, ifetch ? 'I' : 'D', i, ea, 214 *BATu, *BATl, BEPIu, BEPIl, bl); 215 } 216 } 217 #endif 218 219 return -1; 220 } 221 222 static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, 223 target_ulong eaddr, 224 MMUAccessType access_type, 225 hwaddr *raddr, int *prot) 226 { 227 CPUState *cs = CPU(cpu); 228 CPUPPCState *env = &cpu->env; 229 int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 230 231 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 232 233 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 234 /* 235 * Memory-forced I/O controller interface access 236 * 237 * If T=1 and BUID=x'07F', the 601 performs a memory access 238 * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 239 */ 240 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 241 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 242 return 0; 243 } 244 245 if (access_type == MMU_INST_FETCH) { 246 /* No code fetch is allowed in direct-store areas */ 247 cs->exception_index = POWERPC_EXCP_ISI; 248 env->error_code = 0x10000000; 249 return 1; 250 } 251 252 switch (env->access_type) { 253 case ACCESS_INT: 254 /* Integer load/store : only access allowed */ 255 break; 256 case ACCESS_FLOAT: 257 /* Floating point load/store */ 258 cs->exception_index = POWERPC_EXCP_ALIGN; 259 env->error_code = POWERPC_EXCP_ALIGN_FP; 260 env->spr[SPR_DAR] = eaddr; 261 return 1; 262 case ACCESS_RES: 263 /* lwarx, ldarx or srwcx. */ 264 env->error_code = 0; 265 env->spr[SPR_DAR] = eaddr; 266 if (access_type == MMU_DATA_STORE) { 267 env->spr[SPR_DSISR] = 0x06000000; 268 } else { 269 env->spr[SPR_DSISR] = 0x04000000; 270 } 271 return 1; 272 case ACCESS_CACHE: 273 /* 274 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 275 * 276 * Should make the instruction do no-op. As it already do 277 * no-op, it's quite easy :-) 278 */ 279 *raddr = eaddr; 280 return 0; 281 case ACCESS_EXT: 282 /* eciwx or ecowx */ 283 cs->exception_index = POWERPC_EXCP_DSI; 284 env->error_code = 0; 285 env->spr[SPR_DAR] = eaddr; 286 if (access_type == MMU_DATA_STORE) { 287 env->spr[SPR_DSISR] = 0x06100000; 288 } else { 289 env->spr[SPR_DSISR] = 0x04100000; 290 } 291 return 1; 292 default: 293 cpu_abort(cs, "ERROR: instruction should not need " 294 "address translation\n"); 295 } 296 if ((access_type == MMU_DATA_STORE || key != 1) && 297 (access_type == MMU_DATA_LOAD || key != 0)) { 298 *raddr = eaddr; 299 return 0; 300 } else { 301 cs->exception_index = POWERPC_EXCP_DSI; 302 env->error_code = 0; 303 env->spr[SPR_DAR] = eaddr; 304 if (access_type == MMU_DATA_STORE) { 305 env->spr[SPR_DSISR] = 0x0a000000; 306 } else { 307 env->spr[SPR_DSISR] = 0x08000000; 308 } 309 return 1; 310 } 311 } 312 313 hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) 314 { 315 target_ulong mask = ppc_hash32_hpt_mask(cpu); 316 317 return (hash * HASH_PTEG_SIZE_32) & mask; 318 } 319 320 static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, 321 bool secondary, target_ulong ptem, 322 ppc_hash_pte32_t *pte) 323 { 324 hwaddr pte_offset = pteg_off; 325 target_ulong pte0, pte1; 326 int i; 327 328 for (i = 0; i < HPTES_PER_GROUP; i++) { 329 pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); 330 /* 331 * pte0 contains the valid bit and must be read before pte1, 332 * otherwise we might see an old pte1 with a new valid bit and 333 * thus an inconsistent hpte value 334 */ 335 smp_rmb(); 336 pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); 337 338 if ((pte0 & HPTE32_V_VALID) 339 && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) 340 && HPTE32_V_COMPARE(pte0, ptem)) { 341 pte->pte0 = pte0; 342 pte->pte1 = pte1; 343 return pte_offset; 344 } 345 346 pte_offset += HASH_PTE_SIZE_32; 347 } 348 349 return -1; 350 } 351 352 static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) 353 { 354 target_ulong base = ppc_hash32_hpt_base(cpu); 355 hwaddr offset = pte_offset + 6; 356 357 /* The HW performs a non-atomic byte update */ 358 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 359 } 360 361 static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) 362 { 363 target_ulong base = ppc_hash32_hpt_base(cpu); 364 hwaddr offset = pte_offset + 7; 365 366 /* The HW performs a non-atomic byte update */ 367 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 368 } 369 370 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, 371 target_ulong sr, target_ulong eaddr, 372 ppc_hash_pte32_t *pte) 373 { 374 hwaddr pteg_off, pte_offset; 375 hwaddr hash; 376 uint32_t vsid, pgidx, ptem; 377 378 vsid = sr & SR32_VSID; 379 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; 380 hash = vsid ^ pgidx; 381 ptem = (vsid << 7) | (pgidx >> 10); 382 383 /* Page address translation */ 384 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 385 " htab_mask " TARGET_FMT_plx 386 " hash " TARGET_FMT_plx "\n", 387 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 388 389 /* Primary PTEG lookup */ 390 qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 391 " vsid=%" PRIx32 " ptem=%" PRIx32 392 " hash=" TARGET_FMT_plx "\n", 393 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), 394 vsid, ptem, hash); 395 pteg_off = get_pteg_offset32(cpu, hash); 396 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); 397 if (pte_offset == -1) { 398 /* Secondary PTEG lookup */ 399 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 400 " vsid=%" PRIx32 " api=%" PRIx32 401 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), 402 ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); 403 pteg_off = get_pteg_offset32(cpu, ~hash); 404 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); 405 } 406 407 return pte_offset; 408 } 409 410 static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, 411 target_ulong eaddr) 412 { 413 hwaddr rpn = pte.pte1 & HPTE32_R_RPN; 414 hwaddr mask = ~TARGET_PAGE_MASK; 415 416 return (rpn & ~mask) | (eaddr & mask); 417 } 418 419 int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, 420 int mmu_idx) 421 { 422 CPUState *cs = CPU(cpu); 423 CPUPPCState *env = &cpu->env; 424 target_ulong sr; 425 hwaddr pte_offset; 426 ppc_hash_pte32_t pte; 427 int prot; 428 int need_prot; 429 MMUAccessType access_type; 430 hwaddr raddr; 431 432 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 433 access_type = rwx; 434 need_prot = prot_for_access_type(access_type); 435 436 /* 1. Handle real mode accesses */ 437 if (access_type == MMU_INST_FETCH ? !msr_ir : !msr_dr) { 438 /* Translation is off */ 439 raddr = eaddr; 440 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 441 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 442 TARGET_PAGE_SIZE); 443 return 0; 444 } 445 446 /* 2. Check Block Address Translation entries (BATs) */ 447 if (env->nb_BATs != 0) { 448 raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, &prot); 449 if (raddr != -1) { 450 if (need_prot & ~prot) { 451 if (access_type == MMU_INST_FETCH) { 452 cs->exception_index = POWERPC_EXCP_ISI; 453 env->error_code = 0x08000000; 454 } else { 455 cs->exception_index = POWERPC_EXCP_DSI; 456 env->error_code = 0; 457 env->spr[SPR_DAR] = eaddr; 458 if (access_type == MMU_DATA_STORE) { 459 env->spr[SPR_DSISR] = 0x0a000000; 460 } else { 461 env->spr[SPR_DSISR] = 0x08000000; 462 } 463 } 464 return 1; 465 } 466 467 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 468 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 469 TARGET_PAGE_SIZE); 470 return 0; 471 } 472 } 473 474 /* 3. Look up the Segment Register */ 475 sr = env->sr[eaddr >> 28]; 476 477 /* 4. Handle direct store segments */ 478 if (sr & SR32_T) { 479 if (ppc_hash32_direct_store(cpu, sr, eaddr, access_type, 480 &raddr, &prot) == 0) { 481 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 482 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 483 TARGET_PAGE_SIZE); 484 return 0; 485 } else { 486 return 1; 487 } 488 } 489 490 /* 5. Check for segment level no-execute violation */ 491 if (access_type == MMU_INST_FETCH && (sr & SR32_NX)) { 492 cs->exception_index = POWERPC_EXCP_ISI; 493 env->error_code = 0x10000000; 494 return 1; 495 } 496 497 /* 6. Locate the PTE in the hash table */ 498 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 499 if (pte_offset == -1) { 500 if (access_type == MMU_INST_FETCH) { 501 cs->exception_index = POWERPC_EXCP_ISI; 502 env->error_code = 0x40000000; 503 } else { 504 cs->exception_index = POWERPC_EXCP_DSI; 505 env->error_code = 0; 506 env->spr[SPR_DAR] = eaddr; 507 if (access_type == MMU_DATA_STORE) { 508 env->spr[SPR_DSISR] = 0x42000000; 509 } else { 510 env->spr[SPR_DSISR] = 0x40000000; 511 } 512 } 513 514 return 1; 515 } 516 qemu_log_mask(CPU_LOG_MMU, 517 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); 518 519 /* 7. Check access permissions */ 520 521 prot = ppc_hash32_pte_prot(cpu, sr, pte); 522 523 if (need_prot & ~prot) { 524 /* Access right violation */ 525 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 526 if (access_type == MMU_INST_FETCH) { 527 cs->exception_index = POWERPC_EXCP_ISI; 528 env->error_code = 0x08000000; 529 } else { 530 cs->exception_index = POWERPC_EXCP_DSI; 531 env->error_code = 0; 532 env->spr[SPR_DAR] = eaddr; 533 if (access_type == MMU_DATA_STORE) { 534 env->spr[SPR_DSISR] = 0x0a000000; 535 } else { 536 env->spr[SPR_DSISR] = 0x08000000; 537 } 538 } 539 return 1; 540 } 541 542 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 543 544 /* 8. Update PTE referenced and changed bits if necessary */ 545 546 if (!(pte.pte1 & HPTE32_R_R)) { 547 ppc_hash32_set_r(cpu, pte_offset, pte.pte1); 548 } 549 if (!(pte.pte1 & HPTE32_R_C)) { 550 if (access_type == MMU_DATA_STORE) { 551 ppc_hash32_set_c(cpu, pte_offset, pte.pte1); 552 } else { 553 /* 554 * Treat the page as read-only for now, so that a later write 555 * will pass through this function again to set the C bit 556 */ 557 prot &= ~PAGE_WRITE; 558 } 559 } 560 561 /* 9. Determine the real address from the PTE */ 562 563 raddr = ppc_hash32_pte_raddr(sr, pte, eaddr); 564 565 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 566 prot, mmu_idx, TARGET_PAGE_SIZE); 567 568 return 0; 569 } 570 571 hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) 572 { 573 CPUPPCState *env = &cpu->env; 574 target_ulong sr; 575 hwaddr pte_offset; 576 ppc_hash_pte32_t pte; 577 int prot; 578 579 if (msr_dr == 0) { 580 /* Translation is off */ 581 return eaddr; 582 } 583 584 if (env->nb_BATs != 0) { 585 hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot); 586 if (raddr != -1) { 587 return raddr; 588 } 589 } 590 591 sr = env->sr[eaddr >> 28]; 592 593 if (sr & SR32_T) { 594 /* FIXME: Add suitable debug support for Direct Store segments */ 595 return -1; 596 } 597 598 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 599 if (pte_offset == -1) { 600 return -1; 601 } 602 603 return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK; 604 } 605