1 /* 2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qemu/units.h" 30 #include "cpu.h" 31 #include "exec/exec-all.h" 32 #include "exec/gdbstub.h" 33 #include "qemu/host-utils.h" 34 #if !defined(CONFIG_USER_ONLY) 35 #include "hw/loader.h" 36 #endif 37 38 static struct XtensaConfigList *xtensa_cores; 39 40 static void xtensa_core_class_init(ObjectClass *oc, void *data) 41 { 42 CPUClass *cc = CPU_CLASS(oc); 43 XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc); 44 const XtensaConfig *config = data; 45 46 xcc->config = config; 47 48 /* Use num_core_regs to see only non-privileged registers in an unmodified 49 * gdb. Use num_regs to see all registers. gdb modification is required 50 * for that: reset bit 0 in the 'flags' field of the registers definitions 51 * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay. 52 */ 53 cc->gdb_num_core_regs = config->gdb_regmap.num_regs; 54 } 55 56 static void init_libisa(XtensaConfig *config) 57 { 58 unsigned i, j; 59 unsigned opcodes; 60 61 config->isa = xtensa_isa_init(config->isa_internal, NULL, NULL); 62 assert(xtensa_isa_maxlength(config->isa) <= MAX_INSN_LENGTH); 63 opcodes = xtensa_isa_num_opcodes(config->isa); 64 config->opcode_ops = g_new(XtensaOpcodeOps *, opcodes); 65 66 for (i = 0; i < opcodes; ++i) { 67 const char *opc_name = xtensa_opcode_name(config->isa, i); 68 XtensaOpcodeOps *ops = NULL; 69 70 assert(xtensa_opcode_num_operands(config->isa, i) <= MAX_OPCODE_ARGS); 71 if (!config->opcode_translators) { 72 ops = xtensa_find_opcode_ops(&xtensa_core_opcodes, opc_name); 73 } else { 74 for (j = 0; !ops && config->opcode_translators[j]; ++j) { 75 ops = xtensa_find_opcode_ops(config->opcode_translators[j], 76 opc_name); 77 } 78 } 79 #ifdef DEBUG 80 if (ops == NULL) { 81 fprintf(stderr, 82 "opcode translator not found for %s's opcode '%s'\n", 83 config->name, opc_name); 84 } 85 #endif 86 config->opcode_ops[i] = ops; 87 } 88 } 89 90 void xtensa_finalize_config(XtensaConfig *config) 91 { 92 if (config->isa_internal) { 93 init_libisa(config); 94 } 95 96 if (config->gdb_regmap.num_regs == 0 || 97 config->gdb_regmap.num_core_regs == 0) { 98 unsigned n_regs = 0; 99 unsigned n_core_regs = 0; 100 101 xtensa_count_regs(config, &n_regs, &n_core_regs); 102 if (config->gdb_regmap.num_regs == 0) { 103 config->gdb_regmap.num_regs = n_regs; 104 } 105 if (config->gdb_regmap.num_core_regs == 0) { 106 config->gdb_regmap.num_core_regs = n_core_regs; 107 } 108 } 109 } 110 111 void xtensa_register_core(XtensaConfigList *node) 112 { 113 TypeInfo type = { 114 .parent = TYPE_XTENSA_CPU, 115 .class_init = xtensa_core_class_init, 116 .class_data = (void *)node->config, 117 }; 118 119 node->next = xtensa_cores; 120 xtensa_cores = node; 121 type.name = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), node->config->name); 122 type_register(&type); 123 g_free((gpointer)type.name); 124 } 125 126 static uint32_t check_hw_breakpoints(CPUXtensaState *env) 127 { 128 unsigned i; 129 130 for (i = 0; i < env->config->ndbreak; ++i) { 131 if (env->cpu_watchpoint[i] && 132 env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) { 133 return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT); 134 } 135 } 136 return 0; 137 } 138 139 void xtensa_breakpoint_handler(CPUState *cs) 140 { 141 XtensaCPU *cpu = XTENSA_CPU(cs); 142 CPUXtensaState *env = &cpu->env; 143 144 if (cs->watchpoint_hit) { 145 if (cs->watchpoint_hit->flags & BP_CPU) { 146 uint32_t cause; 147 148 cs->watchpoint_hit = NULL; 149 cause = check_hw_breakpoints(env); 150 if (cause) { 151 debug_exception_env(env, cause); 152 } 153 cpu_loop_exit_noexc(cs); 154 } 155 } 156 } 157 158 void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf) 159 { 160 XtensaConfigList *core = xtensa_cores; 161 cpu_fprintf(f, "Available CPUs:\n"); 162 for (; core; core = core->next) { 163 cpu_fprintf(f, " %s\n", core->config->name); 164 } 165 } 166 167 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 168 { 169 #ifndef CONFIG_USER_ONLY 170 XtensaCPU *cpu = XTENSA_CPU(cs); 171 uint32_t paddr; 172 uint32_t page_size; 173 unsigned access; 174 175 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, 176 &paddr, &page_size, &access) == 0) { 177 return paddr; 178 } 179 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, 180 &paddr, &page_size, &access) == 0) { 181 return paddr; 182 } 183 return ~0; 184 #else 185 return addr; 186 #endif 187 } 188 189 #ifndef CONFIG_USER_ONLY 190 191 static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector) 192 { 193 if (xtensa_option_enabled(env->config, 194 XTENSA_OPTION_RELOCATABLE_VECTOR)) { 195 return vector - env->config->vecbase + env->sregs[VECBASE]; 196 } else { 197 return vector; 198 } 199 } 200 201 /*! 202 * Handle penging IRQ. 203 * For the high priority interrupt jump to the corresponding interrupt vector. 204 * For the level-1 interrupt convert it to either user, kernel or double 205 * exception with the 'level-1 interrupt' exception cause. 206 */ 207 static void handle_interrupt(CPUXtensaState *env) 208 { 209 int level = env->pending_irq_level; 210 211 if (level > xtensa_get_cintlevel(env) && 212 level <= env->config->nlevel && 213 (env->config->level_mask[level] & 214 env->sregs[INTSET] & 215 env->sregs[INTENABLE])) { 216 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 217 218 if (level > 1) { 219 env->sregs[EPC1 + level - 1] = env->pc; 220 env->sregs[EPS2 + level - 2] = env->sregs[PS]; 221 env->sregs[PS] = 222 (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; 223 env->pc = relocated_vector(env, 224 env->config->interrupt_vector[level]); 225 } else { 226 env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; 227 228 if (env->sregs[PS] & PS_EXCM) { 229 if (env->config->ndepc) { 230 env->sregs[DEPC] = env->pc; 231 } else { 232 env->sregs[EPC1] = env->pc; 233 } 234 cs->exception_index = EXC_DOUBLE; 235 } else { 236 env->sregs[EPC1] = env->pc; 237 cs->exception_index = 238 (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; 239 } 240 env->sregs[PS] |= PS_EXCM; 241 } 242 env->exception_taken = 1; 243 } 244 } 245 246 /* Called from cpu_handle_interrupt with BQL held */ 247 void xtensa_cpu_do_interrupt(CPUState *cs) 248 { 249 XtensaCPU *cpu = XTENSA_CPU(cs); 250 CPUXtensaState *env = &cpu->env; 251 252 if (cs->exception_index == EXC_IRQ) { 253 qemu_log_mask(CPU_LOG_INT, 254 "%s(EXC_IRQ) level = %d, cintlevel = %d, " 255 "pc = %08x, a0 = %08x, ps = %08x, " 256 "intset = %08x, intenable = %08x, " 257 "ccount = %08x\n", 258 __func__, env->pending_irq_level, xtensa_get_cintlevel(env), 259 env->pc, env->regs[0], env->sregs[PS], 260 env->sregs[INTSET], env->sregs[INTENABLE], 261 env->sregs[CCOUNT]); 262 handle_interrupt(env); 263 } 264 265 switch (cs->exception_index) { 266 case EXC_WINDOW_OVERFLOW4: 267 case EXC_WINDOW_UNDERFLOW4: 268 case EXC_WINDOW_OVERFLOW8: 269 case EXC_WINDOW_UNDERFLOW8: 270 case EXC_WINDOW_OVERFLOW12: 271 case EXC_WINDOW_UNDERFLOW12: 272 case EXC_KERNEL: 273 case EXC_USER: 274 case EXC_DOUBLE: 275 case EXC_DEBUG: 276 qemu_log_mask(CPU_LOG_INT, "%s(%d) " 277 "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n", 278 __func__, cs->exception_index, 279 env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]); 280 if (env->config->exception_vector[cs->exception_index]) { 281 env->pc = relocated_vector(env, 282 env->config->exception_vector[cs->exception_index]); 283 env->exception_taken = 1; 284 } else { 285 qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n", 286 __func__, env->pc, cs->exception_index); 287 } 288 break; 289 290 case EXC_IRQ: 291 break; 292 293 default: 294 qemu_log("%s(pc = %08x) unknown exception_index: %d\n", 295 __func__, env->pc, cs->exception_index); 296 break; 297 } 298 check_interrupts(env); 299 } 300 #else 301 void xtensa_cpu_do_interrupt(CPUState *cs) 302 { 303 } 304 #endif 305 306 bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) 307 { 308 if (interrupt_request & CPU_INTERRUPT_HARD) { 309 cs->exception_index = EXC_IRQ; 310 xtensa_cpu_do_interrupt(cs); 311 return true; 312 } 313 return false; 314 } 315 316 #ifdef CONFIG_USER_ONLY 317 318 int xtensa_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw, 319 int mmu_idx) 320 { 321 XtensaCPU *cpu = XTENSA_CPU(cs); 322 CPUXtensaState *env = &cpu->env; 323 324 qemu_log_mask(CPU_LOG_INT, 325 "%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n", 326 __func__, rw, address, size); 327 env->sregs[EXCVADDR] = address; 328 env->sregs[EXCCAUSE] = rw ? STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE; 329 cs->exception_index = EXC_USER; 330 return 1; 331 } 332 333 #else 334 335 static void reset_tlb_mmu_all_ways(CPUXtensaState *env, 336 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 337 { 338 unsigned wi, ei; 339 340 for (wi = 0; wi < tlb->nways; ++wi) { 341 for (ei = 0; ei < tlb->way_size[wi]; ++ei) { 342 entry[wi][ei].asid = 0; 343 entry[wi][ei].variable = true; 344 } 345 } 346 } 347 348 static void reset_tlb_mmu_ways56(CPUXtensaState *env, 349 const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 350 { 351 if (!tlb->varway56) { 352 static const xtensa_tlb_entry way5[] = { 353 { 354 .vaddr = 0xd0000000, 355 .paddr = 0, 356 .asid = 1, 357 .attr = 7, 358 .variable = false, 359 }, { 360 .vaddr = 0xd8000000, 361 .paddr = 0, 362 .asid = 1, 363 .attr = 3, 364 .variable = false, 365 } 366 }; 367 static const xtensa_tlb_entry way6[] = { 368 { 369 .vaddr = 0xe0000000, 370 .paddr = 0xf0000000, 371 .asid = 1, 372 .attr = 7, 373 .variable = false, 374 }, { 375 .vaddr = 0xf0000000, 376 .paddr = 0xf0000000, 377 .asid = 1, 378 .attr = 3, 379 .variable = false, 380 } 381 }; 382 memcpy(entry[5], way5, sizeof(way5)); 383 memcpy(entry[6], way6, sizeof(way6)); 384 } else { 385 uint32_t ei; 386 for (ei = 0; ei < 8; ++ei) { 387 entry[6][ei].vaddr = ei << 29; 388 entry[6][ei].paddr = ei << 29; 389 entry[6][ei].asid = 1; 390 entry[6][ei].attr = 3; 391 } 392 } 393 } 394 395 static void reset_tlb_region_way0(CPUXtensaState *env, 396 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 397 { 398 unsigned ei; 399 400 for (ei = 0; ei < 8; ++ei) { 401 entry[0][ei].vaddr = ei << 29; 402 entry[0][ei].paddr = ei << 29; 403 entry[0][ei].asid = 1; 404 entry[0][ei].attr = 2; 405 entry[0][ei].variable = true; 406 } 407 } 408 409 void reset_mmu(CPUXtensaState *env) 410 { 411 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 412 env->sregs[RASID] = 0x04030201; 413 env->sregs[ITLBCFG] = 0; 414 env->sregs[DTLBCFG] = 0; 415 env->autorefill_idx = 0; 416 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); 417 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); 418 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); 419 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); 420 } else { 421 reset_tlb_region_way0(env, env->itlb); 422 reset_tlb_region_way0(env, env->dtlb); 423 } 424 } 425 426 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) 427 { 428 unsigned i; 429 for (i = 0; i < 4; ++i) { 430 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { 431 return i; 432 } 433 } 434 return 0xff; 435 } 436 437 /*! 438 * Lookup xtensa TLB for the given virtual address. 439 * See ISA, 4.6.2.2 440 * 441 * \param pwi: [out] way index 442 * \param pei: [out] entry index 443 * \param pring: [out] access ring 444 * \return 0 if ok, exception cause code otherwise 445 */ 446 int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb, 447 uint32_t *pwi, uint32_t *pei, uint8_t *pring) 448 { 449 const xtensa_tlb *tlb = dtlb ? 450 &env->config->dtlb : &env->config->itlb; 451 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? 452 env->dtlb : env->itlb; 453 454 int nhits = 0; 455 unsigned wi; 456 457 for (wi = 0; wi < tlb->nways; ++wi) { 458 uint32_t vpn; 459 uint32_t ei; 460 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); 461 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { 462 unsigned ring = get_ring(env, entry[wi][ei].asid); 463 if (ring < 4) { 464 if (++nhits > 1) { 465 return dtlb ? 466 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 467 INST_TLB_MULTI_HIT_CAUSE; 468 } 469 *pwi = wi; 470 *pei = ei; 471 *pring = ring; 472 } 473 } 474 } 475 return nhits ? 0 : 476 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); 477 } 478 479 /*! 480 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. 481 * See ISA, 4.6.5.10 482 */ 483 static unsigned mmu_attr_to_access(uint32_t attr) 484 { 485 unsigned access = 0; 486 487 if (attr < 12) { 488 access |= PAGE_READ; 489 if (attr & 0x1) { 490 access |= PAGE_EXEC; 491 } 492 if (attr & 0x2) { 493 access |= PAGE_WRITE; 494 } 495 496 switch (attr & 0xc) { 497 case 0: 498 access |= PAGE_CACHE_BYPASS; 499 break; 500 501 case 4: 502 access |= PAGE_CACHE_WB; 503 break; 504 505 case 8: 506 access |= PAGE_CACHE_WT; 507 break; 508 } 509 } else if (attr == 13) { 510 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; 511 } 512 return access; 513 } 514 515 /*! 516 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. 517 * See ISA, 4.6.3.3 518 */ 519 static unsigned region_attr_to_access(uint32_t attr) 520 { 521 static const unsigned access[16] = { 522 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 523 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 524 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 525 [3] = PAGE_EXEC | PAGE_CACHE_WB, 526 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 527 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 528 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 529 }; 530 531 return access[attr & 0xf]; 532 } 533 534 /*! 535 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. 536 * See ISA, A.2.14 The Cache Attribute Register 537 */ 538 static unsigned cacheattr_attr_to_access(uint32_t attr) 539 { 540 static const unsigned access[16] = { 541 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 542 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 543 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 544 [3] = PAGE_EXEC | PAGE_CACHE_WB, 545 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 546 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 547 }; 548 549 return access[attr & 0xf]; 550 } 551 552 static bool is_access_granted(unsigned access, int is_write) 553 { 554 switch (is_write) { 555 case 0: 556 return access & PAGE_READ; 557 558 case 1: 559 return access & PAGE_WRITE; 560 561 case 2: 562 return access & PAGE_EXEC; 563 564 default: 565 return 0; 566 } 567 } 568 569 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); 570 571 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, 572 uint32_t vaddr, int is_write, int mmu_idx, 573 uint32_t *paddr, uint32_t *page_size, unsigned *access, 574 bool may_lookup_pt) 575 { 576 bool dtlb = is_write != 2; 577 uint32_t wi; 578 uint32_t ei; 579 uint8_t ring; 580 uint32_t vpn; 581 uint32_t pte; 582 const xtensa_tlb_entry *entry = NULL; 583 xtensa_tlb_entry tmp_entry; 584 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); 585 586 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && 587 may_lookup_pt && get_pte(env, vaddr, &pte)) { 588 ring = (pte >> 4) & 0x3; 589 wi = 0; 590 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); 591 592 if (update_tlb) { 593 wi = ++env->autorefill_idx & 0x3; 594 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); 595 env->sregs[EXCVADDR] = vaddr; 596 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", 597 __func__, vaddr, vpn, pte); 598 } else { 599 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); 600 entry = &tmp_entry; 601 } 602 ret = 0; 603 } 604 if (ret != 0) { 605 return ret; 606 } 607 608 if (entry == NULL) { 609 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 610 } 611 612 if (ring < mmu_idx) { 613 return dtlb ? 614 LOAD_STORE_PRIVILEGE_CAUSE : 615 INST_FETCH_PRIVILEGE_CAUSE; 616 } 617 618 *access = mmu_attr_to_access(entry->attr) & 619 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); 620 if (!is_access_granted(*access, is_write)) { 621 return dtlb ? 622 (is_write ? 623 STORE_PROHIBITED_CAUSE : 624 LOAD_PROHIBITED_CAUSE) : 625 INST_FETCH_PROHIBITED_CAUSE; 626 } 627 628 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); 629 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 630 631 return 0; 632 } 633 634 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) 635 { 636 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 637 uint32_t paddr; 638 uint32_t page_size; 639 unsigned access; 640 uint32_t pt_vaddr = 641 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; 642 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, 643 &paddr, &page_size, &access, false); 644 645 if (ret == 0) { 646 qemu_log_mask(CPU_LOG_MMU, 647 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", 648 __func__, vaddr, pt_vaddr, paddr); 649 } else { 650 qemu_log_mask(CPU_LOG_MMU, 651 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", 652 __func__, vaddr, pt_vaddr, ret); 653 } 654 655 if (ret == 0) { 656 MemTxResult result; 657 658 *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, 659 &result); 660 if (result != MEMTX_OK) { 661 qemu_log_mask(CPU_LOG_MMU, 662 "%s: couldn't load PTE: transaction failed (%u)\n", 663 __func__, (unsigned)result); 664 ret = 1; 665 } 666 } 667 return ret == 0; 668 } 669 670 static int get_physical_addr_region(CPUXtensaState *env, 671 uint32_t vaddr, int is_write, int mmu_idx, 672 uint32_t *paddr, uint32_t *page_size, unsigned *access) 673 { 674 bool dtlb = is_write != 2; 675 uint32_t wi = 0; 676 uint32_t ei = (vaddr >> 29) & 0x7; 677 const xtensa_tlb_entry *entry = 678 xtensa_tlb_get_entry(env, dtlb, wi, ei); 679 680 *access = region_attr_to_access(entry->attr); 681 if (!is_access_granted(*access, is_write)) { 682 return dtlb ? 683 (is_write ? 684 STORE_PROHIBITED_CAUSE : 685 LOAD_PROHIBITED_CAUSE) : 686 INST_FETCH_PROHIBITED_CAUSE; 687 } 688 689 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); 690 *page_size = ~REGION_PAGE_MASK + 1; 691 692 return 0; 693 } 694 695 /*! 696 * Convert virtual address to physical addr. 697 * MMU may issue pagewalk and change xtensa autorefill TLB way entry. 698 * 699 * \return 0 if ok, exception cause code otherwise 700 */ 701 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, 702 uint32_t vaddr, int is_write, int mmu_idx, 703 uint32_t *paddr, uint32_t *page_size, unsigned *access) 704 { 705 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 706 return get_physical_addr_mmu(env, update_tlb, 707 vaddr, is_write, mmu_idx, paddr, page_size, access, true); 708 } else if (xtensa_option_bits_enabled(env->config, 709 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 710 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { 711 return get_physical_addr_region(env, vaddr, is_write, mmu_idx, 712 paddr, page_size, access); 713 } else { 714 *paddr = vaddr; 715 *page_size = TARGET_PAGE_SIZE; 716 *access = cacheattr_attr_to_access( 717 env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27)); 718 return 0; 719 } 720 } 721 722 static void dump_tlb(FILE *f, fprintf_function cpu_fprintf, 723 CPUXtensaState *env, bool dtlb) 724 { 725 unsigned wi, ei; 726 const xtensa_tlb *conf = 727 dtlb ? &env->config->dtlb : &env->config->itlb; 728 unsigned (*attr_to_access)(uint32_t) = 729 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? 730 mmu_attr_to_access : region_attr_to_access; 731 732 for (wi = 0; wi < conf->nways; ++wi) { 733 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 734 const char *sz_text; 735 bool print_header = true; 736 737 if (sz >= 0x100000) { 738 sz /= MiB; 739 sz_text = "MB"; 740 } else { 741 sz /= KiB; 742 sz_text = "KB"; 743 } 744 745 for (ei = 0; ei < conf->way_size[wi]; ++ei) { 746 const xtensa_tlb_entry *entry = 747 xtensa_tlb_get_entry(env, dtlb, wi, ei); 748 749 if (entry->asid) { 750 static const char * const cache_text[8] = { 751 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", 752 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", 753 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", 754 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", 755 }; 756 unsigned access = attr_to_access(entry->attr); 757 unsigned cache_idx = (access & PAGE_CACHE_MASK) >> 758 PAGE_CACHE_SHIFT; 759 760 if (print_header) { 761 print_header = false; 762 cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text); 763 cpu_fprintf(f, 764 "\tVaddr Paddr ASID Attr RWX Cache\n" 765 "\t---------- ---------- ---- ---- --- -------\n"); 766 } 767 cpu_fprintf(f, 768 "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", 769 entry->vaddr, 770 entry->paddr, 771 entry->asid, 772 entry->attr, 773 (access & PAGE_READ) ? 'R' : '-', 774 (access & PAGE_WRITE) ? 'W' : '-', 775 (access & PAGE_EXEC) ? 'X' : '-', 776 cache_text[cache_idx] ? cache_text[cache_idx] : 777 "Invalid"); 778 } 779 } 780 } 781 } 782 783 void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env) 784 { 785 if (xtensa_option_bits_enabled(env->config, 786 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 787 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | 788 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { 789 790 cpu_fprintf(f, "ITLB:\n"); 791 dump_tlb(f, cpu_fprintf, env, false); 792 cpu_fprintf(f, "\nDTLB:\n"); 793 dump_tlb(f, cpu_fprintf, env, true); 794 } else { 795 cpu_fprintf(f, "No TLB for this CPU core\n"); 796 } 797 } 798 799 void xtensa_runstall(CPUXtensaState *env, bool runstall) 800 { 801 CPUState *cpu = CPU(xtensa_env_get_cpu(env)); 802 803 env->runstall = runstall; 804 cpu->halted = runstall; 805 if (runstall) { 806 cpu_interrupt(cpu, CPU_INTERRUPT_HALT); 807 } else { 808 cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT); 809 } 810 } 811 #endif 812