1 /* 2 * PowerPC MMU, TLB and BAT emulation helpers for QEMU. 3 * 4 * Copyright (c) 2003-2007 Jocelyn Mayer 5 * Copyright (c) 2013 David Gibson, IBM Corporation 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "cpu.h" 23 #include "exec/exec-all.h" 24 #include "exec/helper-proto.h" 25 #include "sysemu/kvm.h" 26 #include "kvm_ppc.h" 27 #include "mmu-hash32.h" 28 #include "exec/log.h" 29 30 /* #define DEBUG_BAT */ 31 32 #ifdef DEBUG_BATS 33 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) 34 #else 35 # define LOG_BATS(...) do { } while (0) 36 #endif 37 38 struct mmu_ctx_hash32 { 39 hwaddr raddr; /* Real address */ 40 int prot; /* Protection bits */ 41 int key; /* Access key */ 42 }; 43 44 static int ppc_hash32_pp_prot(int key, int pp, int nx) 45 { 46 int prot; 47 48 if (key == 0) { 49 switch (pp) { 50 case 0x0: 51 case 0x1: 52 case 0x2: 53 prot = PAGE_READ | PAGE_WRITE; 54 break; 55 56 case 0x3: 57 prot = PAGE_READ; 58 break; 59 60 default: 61 abort(); 62 } 63 } else { 64 switch (pp) { 65 case 0x0: 66 prot = 0; 67 break; 68 69 case 0x1: 70 case 0x3: 71 prot = PAGE_READ; 72 break; 73 74 case 0x2: 75 prot = PAGE_READ | PAGE_WRITE; 76 break; 77 78 default: 79 abort(); 80 } 81 } 82 if (nx == 0) { 83 prot |= PAGE_EXEC; 84 } 85 86 return prot; 87 } 88 89 static int ppc_hash32_pte_prot(PowerPCCPU *cpu, 90 target_ulong sr, ppc_hash_pte32_t pte) 91 { 92 CPUPPCState *env = &cpu->env; 93 unsigned pp, key; 94 95 key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 96 pp = pte.pte1 & HPTE32_R_PP; 97 98 return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX)); 99 } 100 101 static target_ulong hash32_bat_size(PowerPCCPU *cpu, 102 target_ulong batu, target_ulong batl) 103 { 104 CPUPPCState *env = &cpu->env; 105 106 if ((msr_pr && !(batu & BATU32_VP)) 107 || (!msr_pr && !(batu & BATU32_VS))) { 108 return 0; 109 } 110 111 return BATU32_BEPI & ~((batu & BATU32_BL) << 15); 112 } 113 114 static int hash32_bat_prot(PowerPCCPU *cpu, 115 target_ulong batu, target_ulong batl) 116 { 117 int pp, prot; 118 119 prot = 0; 120 pp = batl & BATL32_PP; 121 if (pp != 0) { 122 prot = PAGE_READ | PAGE_EXEC; 123 if (pp == 0x2) { 124 prot |= PAGE_WRITE; 125 } 126 } 127 return prot; 128 } 129 130 static target_ulong hash32_bat_601_size(PowerPCCPU *cpu, 131 target_ulong batu, target_ulong batl) 132 { 133 if (!(batl & BATL32_601_V)) { 134 return 0; 135 } 136 137 return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17); 138 } 139 140 static int hash32_bat_601_prot(PowerPCCPU *cpu, 141 target_ulong batu, target_ulong batl) 142 { 143 CPUPPCState *env = &cpu->env; 144 int key, pp; 145 146 pp = batu & BATU32_601_PP; 147 if (msr_pr == 0) { 148 key = !!(batu & BATU32_601_KS); 149 } else { 150 key = !!(batu & BATU32_601_KP); 151 } 152 return ppc_hash32_pp_prot(key, pp, 0); 153 } 154 155 static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx, 156 int *prot) 157 { 158 CPUPPCState *env = &cpu->env; 159 target_ulong *BATlt, *BATut; 160 int i; 161 162 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__, 163 rwx == 2 ? 'I' : 'D', ea); 164 if (rwx == 2) { 165 BATlt = env->IBAT[1]; 166 BATut = env->IBAT[0]; 167 } else { 168 BATlt = env->DBAT[1]; 169 BATut = env->DBAT[0]; 170 } 171 for (i = 0; i < env->nb_BATs; i++) { 172 target_ulong batu = BATut[i]; 173 target_ulong batl = BATlt[i]; 174 target_ulong mask; 175 176 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 177 mask = hash32_bat_601_size(cpu, batu, batl); 178 } else { 179 mask = hash32_bat_size(cpu, batu, batl); 180 } 181 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 182 " BATl " TARGET_FMT_lx "\n", __func__, 183 type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl); 184 185 if (mask && ((ea & mask) == (batu & BATU32_BEPI))) { 186 hwaddr raddr = (batl & mask) | (ea & ~mask); 187 188 if (unlikely(env->mmu_model == POWERPC_MMU_601)) { 189 *prot = hash32_bat_601_prot(cpu, batu, batl); 190 } else { 191 *prot = hash32_bat_prot(cpu, batu, batl); 192 } 193 194 return raddr & TARGET_PAGE_MASK; 195 } 196 } 197 198 /* No hit */ 199 #if defined(DEBUG_BATS) 200 if (qemu_log_enabled()) { 201 LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea); 202 for (i = 0; i < 4; i++) { 203 BATu = &BATut[i]; 204 BATl = &BATlt[i]; 205 BEPIu = *BATu & BATU32_BEPIU; 206 BEPIl = *BATu & BATU32_BEPIL; 207 bl = (*BATu & 0x00001FFC) << 15; 208 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx 209 " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " 210 TARGET_FMT_lx " " TARGET_FMT_lx "\n", 211 __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea, 212 *BATu, *BATl, BEPIu, BEPIl, bl); 213 } 214 } 215 #endif 216 217 return -1; 218 } 219 220 static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, 221 target_ulong eaddr, int rwx, 222 hwaddr *raddr, int *prot) 223 { 224 CPUState *cs = CPU(cpu); 225 CPUPPCState *env = &cpu->env; 226 int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS)); 227 228 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 229 230 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 231 /* 232 * Memory-forced I/O controller interface access 233 * 234 * If T=1 and BUID=x'07F', the 601 performs a memory access 235 * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 236 */ 237 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 238 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 239 return 0; 240 } 241 242 if (rwx == 2) { 243 /* No code fetch is allowed in direct-store areas */ 244 cs->exception_index = POWERPC_EXCP_ISI; 245 env->error_code = 0x10000000; 246 return 1; 247 } 248 249 switch (env->access_type) { 250 case ACCESS_INT: 251 /* Integer load/store : only access allowed */ 252 break; 253 case ACCESS_FLOAT: 254 /* Floating point load/store */ 255 cs->exception_index = POWERPC_EXCP_ALIGN; 256 env->error_code = POWERPC_EXCP_ALIGN_FP; 257 env->spr[SPR_DAR] = eaddr; 258 return 1; 259 case ACCESS_RES: 260 /* lwarx, ldarx or srwcx. */ 261 env->error_code = 0; 262 env->spr[SPR_DAR] = eaddr; 263 if (rwx == 1) { 264 env->spr[SPR_DSISR] = 0x06000000; 265 } else { 266 env->spr[SPR_DSISR] = 0x04000000; 267 } 268 return 1; 269 case ACCESS_CACHE: 270 /* 271 * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 272 * 273 * Should make the instruction do no-op. As it already do 274 * no-op, it's quite easy :-) 275 */ 276 *raddr = eaddr; 277 return 0; 278 case ACCESS_EXT: 279 /* eciwx or ecowx */ 280 cs->exception_index = POWERPC_EXCP_DSI; 281 env->error_code = 0; 282 env->spr[SPR_DAR] = eaddr; 283 if (rwx == 1) { 284 env->spr[SPR_DSISR] = 0x06100000; 285 } else { 286 env->spr[SPR_DSISR] = 0x04100000; 287 } 288 return 1; 289 default: 290 cpu_abort(cs, "ERROR: instruction should not need " 291 "address translation\n"); 292 } 293 if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) { 294 *raddr = eaddr; 295 return 0; 296 } else { 297 cs->exception_index = POWERPC_EXCP_DSI; 298 env->error_code = 0; 299 env->spr[SPR_DAR] = eaddr; 300 if (rwx == 1) { 301 env->spr[SPR_DSISR] = 0x0a000000; 302 } else { 303 env->spr[SPR_DSISR] = 0x08000000; 304 } 305 return 1; 306 } 307 } 308 309 hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash) 310 { 311 target_ulong mask = ppc_hash32_hpt_mask(cpu); 312 313 return (hash * HASH_PTEG_SIZE_32) & mask; 314 } 315 316 static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off, 317 bool secondary, target_ulong ptem, 318 ppc_hash_pte32_t *pte) 319 { 320 hwaddr pte_offset = pteg_off; 321 target_ulong pte0, pte1; 322 int i; 323 324 for (i = 0; i < HPTES_PER_GROUP; i++) { 325 pte0 = ppc_hash32_load_hpte0(cpu, pte_offset); 326 /* 327 * pte0 contains the valid bit and must be read before pte1, 328 * otherwise we might see an old pte1 with a new valid bit and 329 * thus an inconsistent hpte value 330 */ 331 smp_rmb(); 332 pte1 = ppc_hash32_load_hpte1(cpu, pte_offset); 333 334 if ((pte0 & HPTE32_V_VALID) 335 && (secondary == !!(pte0 & HPTE32_V_SECONDARY)) 336 && HPTE32_V_COMPARE(pte0, ptem)) { 337 pte->pte0 = pte0; 338 pte->pte1 = pte1; 339 return pte_offset; 340 } 341 342 pte_offset += HASH_PTE_SIZE_32; 343 } 344 345 return -1; 346 } 347 348 static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) 349 { 350 target_ulong base = ppc_hash32_hpt_base(cpu); 351 hwaddr offset = pte_offset + 6; 352 353 /* The HW performs a non-atomic byte update */ 354 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 355 } 356 357 static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) 358 { 359 target_ulong base = ppc_hash32_hpt_base(cpu); 360 hwaddr offset = pte_offset + 7; 361 362 /* The HW performs a non-atomic byte update */ 363 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 364 } 365 366 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, 367 target_ulong sr, target_ulong eaddr, 368 ppc_hash_pte32_t *pte) 369 { 370 hwaddr pteg_off, pte_offset; 371 hwaddr hash; 372 uint32_t vsid, pgidx, ptem; 373 374 vsid = sr & SR32_VSID; 375 pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS; 376 hash = vsid ^ pgidx; 377 ptem = (vsid << 7) | (pgidx >> 10); 378 379 /* Page address translation */ 380 qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx 381 " htab_mask " TARGET_FMT_plx 382 " hash " TARGET_FMT_plx "\n", 383 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); 384 385 /* Primary PTEG lookup */ 386 qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 387 " vsid=%" PRIx32 " ptem=%" PRIx32 388 " hash=" TARGET_FMT_plx "\n", 389 ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), 390 vsid, ptem, hash); 391 pteg_off = get_pteg_offset32(cpu, hash); 392 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte); 393 if (pte_offset == -1) { 394 /* Secondary PTEG lookup */ 395 qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx 396 " vsid=%" PRIx32 " api=%" PRIx32 397 " hash=" TARGET_FMT_plx "\n", ppc_hash32_hpt_base(cpu), 398 ppc_hash32_hpt_mask(cpu), vsid, ptem, ~hash); 399 pteg_off = get_pteg_offset32(cpu, ~hash); 400 pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte); 401 } 402 403 return pte_offset; 404 } 405 406 static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte, 407 target_ulong eaddr) 408 { 409 hwaddr rpn = pte.pte1 & HPTE32_R_RPN; 410 hwaddr mask = ~TARGET_PAGE_MASK; 411 412 return (rpn & ~mask) | (eaddr & mask); 413 } 414 415 int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, 416 int mmu_idx) 417 { 418 CPUState *cs = CPU(cpu); 419 CPUPPCState *env = &cpu->env; 420 target_ulong sr; 421 hwaddr pte_offset; 422 ppc_hash_pte32_t pte; 423 int prot; 424 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; 425 hwaddr raddr; 426 427 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 428 429 /* 1. Handle real mode accesses */ 430 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { 431 /* Translation is off */ 432 raddr = eaddr; 433 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 434 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 435 TARGET_PAGE_SIZE); 436 return 0; 437 } 438 439 /* 2. Check Block Address Translation entries (BATs) */ 440 if (env->nb_BATs != 0) { 441 raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot); 442 if (raddr != -1) { 443 if (need_prot[rwx] & ~prot) { 444 if (rwx == 2) { 445 cs->exception_index = POWERPC_EXCP_ISI; 446 env->error_code = 0x08000000; 447 } else { 448 cs->exception_index = POWERPC_EXCP_DSI; 449 env->error_code = 0; 450 env->spr[SPR_DAR] = eaddr; 451 if (rwx == 1) { 452 env->spr[SPR_DSISR] = 0x0a000000; 453 } else { 454 env->spr[SPR_DSISR] = 0x08000000; 455 } 456 } 457 return 1; 458 } 459 460 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 461 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 462 TARGET_PAGE_SIZE); 463 return 0; 464 } 465 } 466 467 /* 3. Look up the Segment Register */ 468 sr = env->sr[eaddr >> 28]; 469 470 /* 4. Handle direct store segments */ 471 if (sr & SR32_T) { 472 if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx, 473 &raddr, &prot) == 0) { 474 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, 475 raddr & TARGET_PAGE_MASK, prot, mmu_idx, 476 TARGET_PAGE_SIZE); 477 return 0; 478 } else { 479 return 1; 480 } 481 } 482 483 /* 5. Check for segment level no-execute violation */ 484 if ((rwx == 2) && (sr & SR32_NX)) { 485 cs->exception_index = POWERPC_EXCP_ISI; 486 env->error_code = 0x10000000; 487 return 1; 488 } 489 490 /* 6. Locate the PTE in the hash table */ 491 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 492 if (pte_offset == -1) { 493 if (rwx == 2) { 494 cs->exception_index = POWERPC_EXCP_ISI; 495 env->error_code = 0x40000000; 496 } else { 497 cs->exception_index = POWERPC_EXCP_DSI; 498 env->error_code = 0; 499 env->spr[SPR_DAR] = eaddr; 500 if (rwx == 1) { 501 env->spr[SPR_DSISR] = 0x42000000; 502 } else { 503 env->spr[SPR_DSISR] = 0x40000000; 504 } 505 } 506 507 return 1; 508 } 509 qemu_log_mask(CPU_LOG_MMU, 510 "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); 511 512 /* 7. Check access permissions */ 513 514 prot = ppc_hash32_pte_prot(cpu, sr, pte); 515 516 if (need_prot[rwx] & ~prot) { 517 /* Access right violation */ 518 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); 519 if (rwx == 2) { 520 cs->exception_index = POWERPC_EXCP_ISI; 521 env->error_code = 0x08000000; 522 } else { 523 cs->exception_index = POWERPC_EXCP_DSI; 524 env->error_code = 0; 525 env->spr[SPR_DAR] = eaddr; 526 if (rwx == 1) { 527 env->spr[SPR_DSISR] = 0x0a000000; 528 } else { 529 env->spr[SPR_DSISR] = 0x08000000; 530 } 531 } 532 return 1; 533 } 534 535 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); 536 537 /* 8. Update PTE referenced and changed bits if necessary */ 538 539 if (!(pte.pte1 & HPTE32_R_R)) { 540 ppc_hash32_set_r(cpu, pte_offset, pte.pte1); 541 } 542 if (!(pte.pte1 & HPTE32_R_C)) { 543 if (rwx == 1) { 544 ppc_hash32_set_c(cpu, pte_offset, pte.pte1); 545 } else { 546 /* 547 * Treat the page as read-only for now, so that a later write 548 * will pass through this function again to set the C bit 549 */ 550 prot &= ~PAGE_WRITE; 551 } 552 } 553 554 /* 9. Determine the real address from the PTE */ 555 556 raddr = ppc_hash32_pte_raddr(sr, pte, eaddr); 557 558 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 559 prot, mmu_idx, TARGET_PAGE_SIZE); 560 561 return 0; 562 } 563 564 hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) 565 { 566 CPUPPCState *env = &cpu->env; 567 target_ulong sr; 568 hwaddr pte_offset; 569 ppc_hash_pte32_t pte; 570 int prot; 571 572 if (msr_dr == 0) { 573 /* Translation is off */ 574 return eaddr; 575 } 576 577 if (env->nb_BATs != 0) { 578 hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot); 579 if (raddr != -1) { 580 return raddr; 581 } 582 } 583 584 sr = env->sr[eaddr >> 28]; 585 586 if (sr & SR32_T) { 587 /* FIXME: Add suitable debug support for Direct Store segments */ 588 return -1; 589 } 590 591 pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte); 592 if (pte_offset == -1) { 593 return -1; 594 } 595 596 return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK; 597 } 598