1 /* 2 * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qemu/log.h" 30 #include "qemu/qemu-print.h" 31 #include "qemu/units.h" 32 #include "cpu.h" 33 #include "exec/helper-proto.h" 34 #include "qemu/host-utils.h" 35 #include "exec/cputlb.h" 36 #include "accel/tcg/cpu-mmu-index.h" 37 #include "exec/exec-all.h" 38 #include "exec/page-protection.h" 39 #include "system/memory.h" 40 41 #define XTENSA_MPU_SEGMENT_MASK 0x0000001f 42 #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00 43 #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8 44 #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000 45 #define XTENSA_MPU_MEM_TYPE_SHIFT 12 46 #define XTENSA_MPU_ATTR_MASK 0x001fff00 47 48 #define XTENSA_MPU_PROBE_B 0x40000000 49 #define XTENSA_MPU_PROBE_V 0x80000000 50 51 #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001 52 #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002 53 #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003 54 #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003 55 56 #define XTENSA_MPU_TYPE_SYS_C 0x0010 57 #define XTENSA_MPU_TYPE_SYS_W 0x0020 58 #define XTENSA_MPU_TYPE_SYS_R 0x0040 59 #define XTENSA_MPU_TYPE_CPU_C 0x0100 60 #define XTENSA_MPU_TYPE_CPU_W 0x0200 61 #define XTENSA_MPU_TYPE_CPU_R 0x0400 62 #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800 63 #define XTENSA_MPU_TYPE_B 0x1000 64 #define XTENSA_MPU_TYPE_INT 0x2000 65 66 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) 67 { 68 /* 69 * Probe the memory; we don't care about the result but 70 * only the side-effects (ie any MMU or other exception) 71 */ 72 probe_access(env, vaddr, 1, MMU_INST_FETCH, 73 cpu_mmu_index(env_cpu(env), true), GETPC()); 74 } 75 76 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 77 { 78 v = (v & 0xffffff00) | 0x1; 79 if (v != env->sregs[RASID]) { 80 env->sregs[RASID] = v; 81 tlb_flush(env_cpu(env)); 82 } 83 } 84 85 static uint32_t get_page_size(const CPUXtensaState *env, 86 bool dtlb, uint32_t way) 87 { 88 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; 89 90 switch (way) { 91 case 4: 92 return (tlbcfg >> 16) & 0x3; 93 94 case 5: 95 return (tlbcfg >> 20) & 0x1; 96 97 case 6: 98 return (tlbcfg >> 24) & 0x1; 99 100 default: 101 return 0; 102 } 103 } 104 105 /*! 106 * Get bit mask for the virtual address bits translated by the TLB way 107 */ 108 static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, 109 bool dtlb, uint32_t way) 110 { 111 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 112 bool varway56 = dtlb ? 113 env->config->dtlb.varway56 : 114 env->config->itlb.varway56; 115 116 switch (way) { 117 case 4: 118 return 0xfff00000 << get_page_size(env, dtlb, way) * 2; 119 120 case 5: 121 if (varway56) { 122 return 0xf8000000 << get_page_size(env, dtlb, way); 123 } else { 124 return 0xf8000000; 125 } 126 127 case 6: 128 if (varway56) { 129 return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); 130 } else { 131 return 0xf0000000; 132 } 133 134 default: 135 return 0xfffff000; 136 } 137 } else { 138 return REGION_PAGE_MASK; 139 } 140 } 141 142 /*! 143 * Get bit mask for the 'VPN without index' field. 144 * See ISA, 4.6.5.6, data format for RxTLB0 145 */ 146 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 147 { 148 if (way < 4) { 149 bool is32 = (dtlb ? 150 env->config->dtlb.nrefillentries : 151 env->config->itlb.nrefillentries) == 32; 152 return is32 ? 0xffff8000 : 0xffffc000; 153 } else if (way == 4) { 154 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; 155 } else if (way <= 6) { 156 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); 157 bool varway56 = dtlb ? 158 env->config->dtlb.varway56 : 159 env->config->itlb.varway56; 160 161 if (varway56) { 162 return mask << (way == 5 ? 2 : 3); 163 } else { 164 return mask << 1; 165 } 166 } else { 167 return 0xfffff000; 168 } 169 } 170 171 /*! 172 * Split virtual address into VPN (with index) and entry index 173 * for the given TLB way 174 */ 175 static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, 176 bool dtlb, uint32_t *vpn, 177 uint32_t wi, uint32_t *ei) 178 { 179 bool varway56 = dtlb ? 180 env->config->dtlb.varway56 : 181 env->config->itlb.varway56; 182 183 if (!dtlb) { 184 wi &= 7; 185 } 186 187 if (wi < 4) { 188 bool is32 = (dtlb ? 189 env->config->dtlb.nrefillentries : 190 env->config->itlb.nrefillentries) == 32; 191 *ei = (v >> 12) & (is32 ? 0x7 : 0x3); 192 } else { 193 switch (wi) { 194 case 4: 195 { 196 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; 197 *ei = (v >> eibase) & 0x3; 198 } 199 break; 200 201 case 5: 202 if (varway56) { 203 uint32_t eibase = 27 + get_page_size(env, dtlb, wi); 204 *ei = (v >> eibase) & 0x3; 205 } else { 206 *ei = (v >> 27) & 0x1; 207 } 208 break; 209 210 case 6: 211 if (varway56) { 212 uint32_t eibase = 29 - get_page_size(env, dtlb, wi); 213 *ei = (v >> eibase) & 0x7; 214 } else { 215 *ei = (v >> 28) & 0x1; 216 } 217 break; 218 219 default: 220 *ei = 0; 221 break; 222 } 223 } 224 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); 225 } 226 227 /*! 228 * Split TLB address into TLB way, entry index and VPN (with index). 229 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format 230 */ 231 static bool split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, 232 uint32_t *vpn, uint32_t *wi, uint32_t *ei) 233 { 234 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 235 *wi = v & (dtlb ? 0xf : 0x7); 236 if (*wi < (dtlb ? env->config->dtlb.nways : env->config->itlb.nways)) { 237 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); 238 return true; 239 } else { 240 return false; 241 } 242 } else { 243 *vpn = v & REGION_PAGE_MASK; 244 *wi = 0; 245 *ei = (v >> 29) & 0x7; 246 return true; 247 } 248 } 249 250 static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb, 251 unsigned wi, unsigned ei) 252 { 253 const xtensa_tlb *tlb = dtlb ? &env->config->dtlb : &env->config->itlb; 254 255 assert(wi < tlb->nways && ei < tlb->way_size[wi]); 256 return dtlb ? 257 env->dtlb[wi] + ei : 258 env->itlb[wi] + ei; 259 } 260 261 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, 262 uint32_t v, bool dtlb, uint32_t *pwi) 263 { 264 uint32_t vpn; 265 uint32_t wi; 266 uint32_t ei; 267 268 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 269 if (pwi) { 270 *pwi = wi; 271 } 272 return xtensa_tlb_get_entry(env, dtlb, wi, ei); 273 } else { 274 return NULL; 275 } 276 } 277 278 static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, 279 xtensa_tlb_entry *entry, bool dtlb, 280 unsigned wi, unsigned ei, uint32_t vpn, 281 uint32_t pte) 282 { 283 entry->vaddr = vpn; 284 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); 285 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; 286 entry->attr = pte & 0xf; 287 } 288 289 static void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, 290 unsigned wi, unsigned ei, 291 uint32_t vpn, uint32_t pte) 292 { 293 CPUState *cs = env_cpu(env); 294 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 295 296 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 297 if (entry->variable) { 298 if (entry->asid) { 299 tlb_flush_page(cs, entry->vaddr); 300 } 301 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); 302 tlb_flush_page(cs, entry->vaddr); 303 } else { 304 qemu_log_mask(LOG_GUEST_ERROR, 305 "%s %d, %d, %d trying to set immutable entry\n", 306 __func__, dtlb, wi, ei); 307 } 308 } else { 309 tlb_flush_page(cs, entry->vaddr); 310 if (xtensa_option_enabled(env->config, 311 XTENSA_OPTION_REGION_TRANSLATION)) { 312 entry->paddr = pte & REGION_PAGE_MASK; 313 } 314 entry->attr = pte & 0xf; 315 } 316 } 317 318 hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) 319 { 320 XtensaCPU *cpu = XTENSA_CPU(cs); 321 uint32_t paddr; 322 uint32_t page_size; 323 unsigned access; 324 325 if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, 326 &paddr, &page_size, &access) == 0) { 327 return paddr; 328 } 329 if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, 330 &paddr, &page_size, &access) == 0) { 331 return paddr; 332 } 333 return ~0; 334 } 335 336 static void reset_tlb_mmu_all_ways(CPUXtensaState *env, 337 const xtensa_tlb *tlb, 338 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 339 { 340 unsigned wi, ei; 341 342 for (wi = 0; wi < tlb->nways; ++wi) { 343 for (ei = 0; ei < tlb->way_size[wi]; ++ei) { 344 entry[wi][ei].asid = 0; 345 entry[wi][ei].variable = true; 346 } 347 } 348 } 349 350 static void reset_tlb_mmu_ways56(CPUXtensaState *env, 351 const xtensa_tlb *tlb, 352 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 353 { 354 if (!tlb->varway56) { 355 static const xtensa_tlb_entry way5[] = { 356 { 357 .vaddr = 0xd0000000, 358 .paddr = 0, 359 .asid = 1, 360 .attr = 7, 361 .variable = false, 362 }, { 363 .vaddr = 0xd8000000, 364 .paddr = 0, 365 .asid = 1, 366 .attr = 3, 367 .variable = false, 368 } 369 }; 370 static const xtensa_tlb_entry way6[] = { 371 { 372 .vaddr = 0xe0000000, 373 .paddr = 0xf0000000, 374 .asid = 1, 375 .attr = 7, 376 .variable = false, 377 }, { 378 .vaddr = 0xf0000000, 379 .paddr = 0xf0000000, 380 .asid = 1, 381 .attr = 3, 382 .variable = false, 383 } 384 }; 385 memcpy(entry[5], way5, sizeof(way5)); 386 memcpy(entry[6], way6, sizeof(way6)); 387 } else { 388 uint32_t ei; 389 for (ei = 0; ei < 8; ++ei) { 390 entry[6][ei].vaddr = ei << 29; 391 entry[6][ei].paddr = ei << 29; 392 entry[6][ei].asid = 1; 393 entry[6][ei].attr = 3; 394 } 395 } 396 } 397 398 static void reset_tlb_region_way0(CPUXtensaState *env, 399 xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) 400 { 401 unsigned ei; 402 403 for (ei = 0; ei < 8; ++ei) { 404 entry[0][ei].vaddr = ei << 29; 405 entry[0][ei].paddr = ei << 29; 406 entry[0][ei].asid = 1; 407 entry[0][ei].attr = 2; 408 entry[0][ei].variable = true; 409 } 410 } 411 412 void reset_mmu(CPUXtensaState *env) 413 { 414 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 415 env->sregs[RASID] = 0x04030201; 416 env->sregs[ITLBCFG] = 0; 417 env->sregs[DTLBCFG] = 0; 418 env->autorefill_idx = 0; 419 reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); 420 reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); 421 reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); 422 reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); 423 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 424 unsigned i; 425 426 env->sregs[MPUENB] = 0; 427 env->sregs[MPUCFG] = env->config->n_mpu_fg_segments; 428 env->sregs[CACHEADRDIS] = 0; 429 assert(env->config->n_mpu_bg_segments > 0 && 430 env->config->mpu_bg[0].vaddr == 0); 431 for (i = 1; i < env->config->n_mpu_bg_segments; ++i) { 432 assert(env->config->mpu_bg[i].vaddr >= 433 env->config->mpu_bg[i - 1].vaddr); 434 } 435 } else { 436 env->sregs[CACHEATTR] = 0x22222222; 437 reset_tlb_region_way0(env, env->itlb); 438 reset_tlb_region_way0(env, env->dtlb); 439 } 440 } 441 442 static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) 443 { 444 unsigned i; 445 for (i = 0; i < 4; ++i) { 446 if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { 447 return i; 448 } 449 } 450 return 0xff; 451 } 452 453 /*! 454 * Lookup xtensa TLB for the given virtual address. 455 * See ISA, 4.6.2.2 456 * 457 * \param pwi: [out] way index 458 * \param pei: [out] entry index 459 * \param pring: [out] access ring 460 * \return 0 if ok, exception cause code otherwise 461 */ 462 static int xtensa_tlb_lookup(const CPUXtensaState *env, 463 uint32_t addr, bool dtlb, 464 uint32_t *pwi, uint32_t *pei, uint8_t *pring) 465 { 466 const xtensa_tlb *tlb = dtlb ? 467 &env->config->dtlb : &env->config->itlb; 468 const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? 469 env->dtlb : env->itlb; 470 471 int nhits = 0; 472 unsigned wi; 473 474 for (wi = 0; wi < tlb->nways; ++wi) { 475 uint32_t vpn; 476 uint32_t ei; 477 split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); 478 if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { 479 unsigned ring = get_ring(env, entry[wi][ei].asid); 480 if (ring < 4) { 481 if (++nhits > 1) { 482 return dtlb ? 483 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 484 INST_TLB_MULTI_HIT_CAUSE; 485 } 486 *pwi = wi; 487 *pei = ei; 488 *pring = ring; 489 } 490 } 491 } 492 return nhits ? 0 : 493 (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); 494 } 495 496 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 497 { 498 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 499 uint32_t wi; 500 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 501 502 if (entry) { 503 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; 504 } else { 505 return 0; 506 } 507 } else { 508 return v & REGION_PAGE_MASK; 509 } 510 } 511 512 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 513 { 514 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); 515 516 if (entry) { 517 return entry->paddr | entry->attr; 518 } else { 519 return 0; 520 } 521 } 522 523 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 524 { 525 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 526 uint32_t wi; 527 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 528 if (entry && entry->variable && entry->asid) { 529 tlb_flush_page(env_cpu(env), entry->vaddr); 530 entry->asid = 0; 531 } 532 } 533 } 534 535 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 536 { 537 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 538 uint32_t wi; 539 uint32_t ei; 540 uint8_t ring; 541 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); 542 543 switch (res) { 544 case 0: 545 if (ring >= xtensa_get_ring(env)) { 546 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); 547 } 548 break; 549 550 case INST_TLB_MULTI_HIT_CAUSE: 551 case LOAD_STORE_TLB_MULTI_HIT_CAUSE: 552 HELPER(exception_cause_vaddr)(env, env->pc, res, v); 553 break; 554 } 555 return 0; 556 } else { 557 return (v & REGION_PAGE_MASK) | 0x1; 558 } 559 } 560 561 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) 562 { 563 uint32_t vpn; 564 uint32_t wi; 565 uint32_t ei; 566 if (split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei)) { 567 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); 568 } 569 } 570 571 /*! 572 * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. 573 * See ISA, 4.6.5.10 574 */ 575 static unsigned mmu_attr_to_access(uint32_t attr) 576 { 577 unsigned access = 0; 578 579 if (attr < 12) { 580 access |= PAGE_READ; 581 if (attr & 0x1) { 582 access |= PAGE_EXEC; 583 } 584 if (attr & 0x2) { 585 access |= PAGE_WRITE; 586 } 587 588 switch (attr & 0xc) { 589 case 0: 590 access |= PAGE_CACHE_BYPASS; 591 break; 592 593 case 4: 594 access |= PAGE_CACHE_WB; 595 break; 596 597 case 8: 598 access |= PAGE_CACHE_WT; 599 break; 600 } 601 } else if (attr == 13) { 602 access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; 603 } 604 return access; 605 } 606 607 /*! 608 * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. 609 * See ISA, 4.6.3.3 610 */ 611 static unsigned region_attr_to_access(uint32_t attr) 612 { 613 static const unsigned access[16] = { 614 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 615 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 616 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 617 [3] = PAGE_EXEC | PAGE_CACHE_WB, 618 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 619 [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 620 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 621 }; 622 623 return access[attr & 0xf]; 624 } 625 626 /*! 627 * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. 628 * See ISA, A.2.14 The Cache Attribute Register 629 */ 630 static unsigned cacheattr_attr_to_access(uint32_t attr) 631 { 632 static const unsigned access[16] = { 633 [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, 634 [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, 635 [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, 636 [3] = PAGE_EXEC | PAGE_CACHE_WB, 637 [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, 638 [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, 639 }; 640 641 return access[attr & 0xf]; 642 } 643 644 struct attr_pattern { 645 uint32_t mask; 646 uint32_t value; 647 }; 648 649 static int attr_pattern_match(uint32_t attr, 650 const struct attr_pattern *pattern, 651 size_t n) 652 { 653 size_t i; 654 655 for (i = 0; i < n; ++i) { 656 if ((attr & pattern[i].mask) == pattern[i].value) { 657 return 1; 658 } 659 } 660 return 0; 661 } 662 663 static unsigned mpu_attr_to_cpu_cache(uint32_t attr) 664 { 665 static const struct attr_pattern cpu_c[] = { 666 { .mask = 0x18f, .value = 0x089 }, 667 { .mask = 0x188, .value = 0x080 }, 668 { .mask = 0x180, .value = 0x180 }, 669 }; 670 671 unsigned type = 0; 672 673 if (attr_pattern_match(attr, cpu_c, ARRAY_SIZE(cpu_c))) { 674 type |= XTENSA_MPU_TYPE_CPU_CACHE; 675 if (attr & 0x10) { 676 type |= XTENSA_MPU_TYPE_CPU_C; 677 } 678 if (attr & 0x20) { 679 type |= XTENSA_MPU_TYPE_CPU_W; 680 } 681 if (attr & 0x40) { 682 type |= XTENSA_MPU_TYPE_CPU_R; 683 } 684 } 685 return type; 686 } 687 688 static unsigned mpu_attr_to_type(uint32_t attr) 689 { 690 static const struct attr_pattern device_type[] = { 691 { .mask = 0x1f6, .value = 0x000 }, 692 { .mask = 0x1f6, .value = 0x006 }, 693 }; 694 static const struct attr_pattern sys_nc_type[] = { 695 { .mask = 0x1fe, .value = 0x018 }, 696 { .mask = 0x1fe, .value = 0x01e }, 697 { .mask = 0x18f, .value = 0x089 }, 698 }; 699 static const struct attr_pattern sys_c_type[] = { 700 { .mask = 0x1f8, .value = 0x010 }, 701 { .mask = 0x188, .value = 0x080 }, 702 { .mask = 0x1f0, .value = 0x030 }, 703 { .mask = 0x180, .value = 0x180 }, 704 }; 705 static const struct attr_pattern b[] = { 706 { .mask = 0x1f7, .value = 0x001 }, 707 { .mask = 0x1f7, .value = 0x007 }, 708 { .mask = 0x1ff, .value = 0x019 }, 709 { .mask = 0x1ff, .value = 0x01f }, 710 }; 711 712 unsigned type = 0; 713 714 attr = (attr & XTENSA_MPU_MEM_TYPE_MASK) >> XTENSA_MPU_MEM_TYPE_SHIFT; 715 if (attr_pattern_match(attr, device_type, ARRAY_SIZE(device_type))) { 716 type |= XTENSA_MPU_SYSTEM_TYPE_DEVICE; 717 if (attr & 0x80) { 718 type |= XTENSA_MPU_TYPE_INT; 719 } 720 } 721 if (attr_pattern_match(attr, sys_nc_type, ARRAY_SIZE(sys_nc_type))) { 722 type |= XTENSA_MPU_SYSTEM_TYPE_NC; 723 } 724 if (attr_pattern_match(attr, sys_c_type, ARRAY_SIZE(sys_c_type))) { 725 type |= XTENSA_MPU_SYSTEM_TYPE_C; 726 if (attr & 0x1) { 727 type |= XTENSA_MPU_TYPE_SYS_C; 728 } 729 if (attr & 0x2) { 730 type |= XTENSA_MPU_TYPE_SYS_W; 731 } 732 if (attr & 0x4) { 733 type |= XTENSA_MPU_TYPE_SYS_R; 734 } 735 } 736 if (attr_pattern_match(attr, b, ARRAY_SIZE(b))) { 737 type |= XTENSA_MPU_TYPE_B; 738 } 739 type |= mpu_attr_to_cpu_cache(attr); 740 741 return type; 742 } 743 744 static unsigned mpu_attr_to_access(uint32_t attr, unsigned ring) 745 { 746 static const unsigned access[2][16] = { 747 [0] = { 748 [4] = PAGE_READ, 749 [5] = PAGE_READ | PAGE_EXEC, 750 [6] = PAGE_READ | PAGE_WRITE, 751 [7] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 752 [8] = PAGE_WRITE, 753 [9] = PAGE_READ | PAGE_WRITE, 754 [10] = PAGE_READ | PAGE_WRITE, 755 [11] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 756 [12] = PAGE_READ, 757 [13] = PAGE_READ | PAGE_EXEC, 758 [14] = PAGE_READ | PAGE_WRITE, 759 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 760 }, 761 [1] = { 762 [8] = PAGE_WRITE, 763 [9] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 764 [10] = PAGE_READ, 765 [11] = PAGE_READ | PAGE_EXEC, 766 [12] = PAGE_READ, 767 [13] = PAGE_READ | PAGE_EXEC, 768 [14] = PAGE_READ | PAGE_WRITE, 769 [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, 770 }, 771 }; 772 unsigned rv; 773 unsigned type; 774 775 type = mpu_attr_to_cpu_cache(attr); 776 rv = access[ring != 0][(attr & XTENSA_MPU_ACC_RIGHTS_MASK) >> 777 XTENSA_MPU_ACC_RIGHTS_SHIFT]; 778 779 if (type & XTENSA_MPU_TYPE_CPU_CACHE) { 780 rv |= (type & XTENSA_MPU_TYPE_CPU_C) ? PAGE_CACHE_WB : PAGE_CACHE_WT; 781 } else { 782 rv |= PAGE_CACHE_BYPASS; 783 } 784 return rv; 785 } 786 787 static bool is_access_granted(unsigned access, int is_write) 788 { 789 switch (is_write) { 790 case 0: 791 return access & PAGE_READ; 792 793 case 1: 794 return access & PAGE_WRITE; 795 796 case 2: 797 return access & PAGE_EXEC; 798 799 default: 800 return 0; 801 } 802 } 803 804 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); 805 806 static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, 807 uint32_t vaddr, int is_write, int mmu_idx, 808 uint32_t *paddr, uint32_t *page_size, 809 unsigned *access, bool may_lookup_pt) 810 { 811 bool dtlb = is_write != 2; 812 uint32_t wi; 813 uint32_t ei; 814 uint8_t ring; 815 uint32_t vpn; 816 uint32_t pte; 817 const xtensa_tlb_entry *entry = NULL; 818 xtensa_tlb_entry tmp_entry; 819 int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); 820 821 if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && 822 may_lookup_pt && get_pte(env, vaddr, &pte)) { 823 ring = (pte >> 4) & 0x3; 824 wi = 0; 825 split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); 826 827 if (update_tlb) { 828 wi = ++env->autorefill_idx & 0x3; 829 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); 830 env->sregs[EXCVADDR] = vaddr; 831 qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n", 832 __func__, vaddr, vpn, pte); 833 } else { 834 xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); 835 entry = &tmp_entry; 836 } 837 ret = 0; 838 } 839 if (ret != 0) { 840 return ret; 841 } 842 843 if (entry == NULL) { 844 entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 845 } 846 847 if (ring < mmu_idx) { 848 return dtlb ? 849 LOAD_STORE_PRIVILEGE_CAUSE : 850 INST_FETCH_PRIVILEGE_CAUSE; 851 } 852 853 *access = mmu_attr_to_access(entry->attr) & 854 ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); 855 if (!is_access_granted(*access, is_write)) { 856 return dtlb ? 857 (is_write ? 858 STORE_PROHIBITED_CAUSE : 859 LOAD_PROHIBITED_CAUSE) : 860 INST_FETCH_PROHIBITED_CAUSE; 861 } 862 863 *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); 864 *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 865 866 return 0; 867 } 868 869 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) 870 { 871 CPUState *cs = env_cpu(env); 872 uint32_t paddr; 873 uint32_t page_size; 874 unsigned access; 875 uint32_t pt_vaddr = 876 (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; 877 int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, 878 &paddr, &page_size, &access, false); 879 880 if (ret == 0) { 881 qemu_log_mask(CPU_LOG_MMU, 882 "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n", 883 __func__, vaddr, pt_vaddr, paddr); 884 } else { 885 qemu_log_mask(CPU_LOG_MMU, 886 "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n", 887 __func__, vaddr, pt_vaddr, ret); 888 } 889 890 if (ret == 0) { 891 MemTxResult result; 892 893 *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, 894 &result); 895 if (result != MEMTX_OK) { 896 qemu_log_mask(CPU_LOG_MMU, 897 "%s: couldn't load PTE: transaction failed (%u)\n", 898 __func__, (unsigned)result); 899 ret = 1; 900 } 901 } 902 return ret == 0; 903 } 904 905 static int get_physical_addr_region(CPUXtensaState *env, 906 uint32_t vaddr, int is_write, int mmu_idx, 907 uint32_t *paddr, uint32_t *page_size, 908 unsigned *access) 909 { 910 bool dtlb = is_write != 2; 911 uint32_t wi = 0; 912 uint32_t ei = (vaddr >> 29) & 0x7; 913 const xtensa_tlb_entry *entry = 914 xtensa_tlb_get_entry(env, dtlb, wi, ei); 915 916 *access = region_attr_to_access(entry->attr); 917 if (!is_access_granted(*access, is_write)) { 918 return dtlb ? 919 (is_write ? 920 STORE_PROHIBITED_CAUSE : 921 LOAD_PROHIBITED_CAUSE) : 922 INST_FETCH_PROHIBITED_CAUSE; 923 } 924 925 *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); 926 *page_size = ~REGION_PAGE_MASK + 1; 927 928 return 0; 929 } 930 931 static int xtensa_mpu_lookup(const xtensa_mpu_entry *entry, unsigned n, 932 uint32_t vaddr, unsigned *segment) 933 { 934 unsigned nhits = 0; 935 unsigned i; 936 937 for (i = 0; i < n; ++i) { 938 if (vaddr >= entry[i].vaddr && 939 (i == n - 1 || vaddr < entry[i + 1].vaddr)) { 940 if (nhits++) { 941 break; 942 } 943 *segment = i; 944 } 945 } 946 return nhits; 947 } 948 949 void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v) 950 { 951 v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1; 952 953 if (v != env->sregs[MPUENB]) { 954 env->sregs[MPUENB] = v; 955 tlb_flush(env_cpu(env)); 956 } 957 } 958 959 void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v) 960 { 961 unsigned segment = p & XTENSA_MPU_SEGMENT_MASK; 962 963 if (segment < env->config->n_mpu_fg_segments) { 964 env->mpu_fg[segment].vaddr = v & -env->config->mpu_align; 965 env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK; 966 env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v); 967 tlb_flush(env_cpu(env)); 968 } 969 } 970 971 uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s) 972 { 973 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 974 975 if (segment < env->config->n_mpu_fg_segments) { 976 return env->mpu_fg[segment].vaddr | 977 extract32(env->sregs[MPUENB], segment, 1); 978 } else { 979 return 0; 980 } 981 } 982 983 uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s) 984 { 985 unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; 986 987 if (segment < env->config->n_mpu_fg_segments) { 988 return env->mpu_fg[segment].attr; 989 } else { 990 return 0; 991 } 992 } 993 994 uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v) 995 { 996 unsigned nhits; 997 unsigned segment; 998 unsigned bg_segment; 999 1000 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1001 v, &segment); 1002 if (nhits > 1) { 1003 HELPER(exception_cause_vaddr)(env, env->pc, 1004 LOAD_STORE_TLB_MULTI_HIT_CAUSE, v); 1005 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1006 return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V; 1007 } else { 1008 xtensa_mpu_lookup(env->config->mpu_bg, 1009 env->config->n_mpu_bg_segments, 1010 v, &bg_segment); 1011 return env->config->mpu_bg[bg_segment].attr | XTENSA_MPU_PROBE_B; 1012 } 1013 } 1014 1015 static int get_physical_addr_mpu(CPUXtensaState *env, 1016 uint32_t vaddr, int is_write, int mmu_idx, 1017 uint32_t *paddr, uint32_t *page_size, 1018 unsigned *access) 1019 { 1020 unsigned nhits; 1021 unsigned segment; 1022 uint32_t attr; 1023 1024 nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, 1025 vaddr, &segment); 1026 if (nhits > 1) { 1027 return is_write < 2 ? 1028 LOAD_STORE_TLB_MULTI_HIT_CAUSE : 1029 INST_TLB_MULTI_HIT_CAUSE; 1030 } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { 1031 attr = env->mpu_fg[segment].attr; 1032 } else { 1033 xtensa_mpu_lookup(env->config->mpu_bg, 1034 env->config->n_mpu_bg_segments, 1035 vaddr, &segment); 1036 attr = env->config->mpu_bg[segment].attr; 1037 } 1038 1039 *access = mpu_attr_to_access(attr, mmu_idx); 1040 if (!is_access_granted(*access, is_write)) { 1041 return is_write < 2 ? 1042 (is_write ? 1043 STORE_PROHIBITED_CAUSE : 1044 LOAD_PROHIBITED_CAUSE) : 1045 INST_FETCH_PROHIBITED_CAUSE; 1046 } 1047 *paddr = vaddr; 1048 *page_size = env->config->mpu_align; 1049 return 0; 1050 } 1051 1052 /*! 1053 * Convert virtual address to physical addr. 1054 * MMU may issue pagewalk and change xtensa autorefill TLB way entry. 1055 * 1056 * \return 0 if ok, exception cause code otherwise 1057 */ 1058 int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, 1059 uint32_t vaddr, int is_write, int mmu_idx, 1060 uint32_t *paddr, uint32_t *page_size, 1061 unsigned *access) 1062 { 1063 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 1064 return get_physical_addr_mmu(env, update_tlb, 1065 vaddr, is_write, mmu_idx, paddr, 1066 page_size, access, true); 1067 } else if (xtensa_option_bits_enabled(env->config, 1068 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1069 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { 1070 return get_physical_addr_region(env, vaddr, is_write, mmu_idx, 1071 paddr, page_size, access); 1072 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1073 return get_physical_addr_mpu(env, vaddr, is_write, mmu_idx, 1074 paddr, page_size, access); 1075 } else { 1076 *paddr = vaddr; 1077 *page_size = TARGET_PAGE_SIZE; 1078 *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> 1079 ((vaddr & 0xe0000000) >> 27)); 1080 return 0; 1081 } 1082 } 1083 1084 static void dump_tlb(CPUXtensaState *env, bool dtlb) 1085 { 1086 unsigned wi, ei; 1087 const xtensa_tlb *conf = 1088 dtlb ? &env->config->dtlb : &env->config->itlb; 1089 unsigned (*attr_to_access)(uint32_t) = 1090 xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? 1091 mmu_attr_to_access : region_attr_to_access; 1092 1093 for (wi = 0; wi < conf->nways; ++wi) { 1094 uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; 1095 const char *sz_text; 1096 bool print_header = true; 1097 1098 if (sz >= 0x100000) { 1099 sz /= MiB; 1100 sz_text = "MB"; 1101 } else { 1102 sz /= KiB; 1103 sz_text = "KB"; 1104 } 1105 1106 for (ei = 0; ei < conf->way_size[wi]; ++ei) { 1107 const xtensa_tlb_entry *entry = 1108 xtensa_tlb_get_entry(env, dtlb, wi, ei); 1109 1110 if (entry->asid) { 1111 static const char * const cache_text[8] = { 1112 [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass", 1113 [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT", 1114 [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB", 1115 [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate", 1116 }; 1117 unsigned access = attr_to_access(entry->attr); 1118 unsigned cache_idx = (access & PAGE_CACHE_MASK) >> 1119 PAGE_CACHE_SHIFT; 1120 1121 if (print_header) { 1122 print_header = false; 1123 qemu_printf("Way %u (%d %s)\n", wi, sz, sz_text); 1124 qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" 1125 "\t---------- ---------- ---- ---- --- -------\n"); 1126 } 1127 qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n", 1128 entry->vaddr, 1129 entry->paddr, 1130 entry->asid, 1131 entry->attr, 1132 (access & PAGE_READ) ? 'R' : '-', 1133 (access & PAGE_WRITE) ? 'W' : '-', 1134 (access & PAGE_EXEC) ? 'X' : '-', 1135 cache_text[cache_idx] ? 1136 cache_text[cache_idx] : "Invalid"); 1137 } 1138 } 1139 } 1140 } 1141 1142 static void dump_mpu(CPUXtensaState *env, 1143 const xtensa_mpu_entry *entry, unsigned n) 1144 { 1145 unsigned i; 1146 1147 qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n" 1148 "\t%s ---------- ---------- ----- ----- ------------- ---------\n", 1149 env ? "En" : " ", 1150 env ? "--" : " "); 1151 1152 for (i = 0; i < n; ++i) { 1153 uint32_t attr = entry[i].attr; 1154 unsigned access0 = mpu_attr_to_access(attr, 0); 1155 unsigned access1 = mpu_attr_to_access(attr, 1); 1156 unsigned type = mpu_attr_to_type(attr); 1157 char cpu_cache = (type & XTENSA_MPU_TYPE_CPU_CACHE) ? '-' : ' '; 1158 1159 qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c ", 1160 env ? 1161 ((env->sregs[MPUENB] & (1u << i)) ? '+' : '-') : ' ', 1162 entry[i].vaddr, attr, 1163 (access0 & PAGE_READ) ? 'R' : '-', 1164 (access0 & PAGE_WRITE) ? 'W' : '-', 1165 (access0 & PAGE_EXEC) ? 'X' : '-', 1166 (access1 & PAGE_READ) ? 'R' : '-', 1167 (access1 & PAGE_WRITE) ? 'W' : '-', 1168 (access1 & PAGE_EXEC) ? 'X' : '-'); 1169 1170 switch (type & XTENSA_MPU_SYSTEM_TYPE_MASK) { 1171 case XTENSA_MPU_SYSTEM_TYPE_DEVICE: 1172 qemu_printf("Device %cB %3s\n", 1173 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1174 (type & XTENSA_MPU_TYPE_INT) ? "int" : ""); 1175 break; 1176 case XTENSA_MPU_SYSTEM_TYPE_NC: 1177 qemu_printf("Sys NC %cB %c%c%c\n", 1178 (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', 1179 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1180 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1181 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1182 break; 1183 case XTENSA_MPU_SYSTEM_TYPE_C: 1184 qemu_printf("Sys C %c%c%c %c%c%c\n", 1185 (type & XTENSA_MPU_TYPE_SYS_R) ? 'R' : '-', 1186 (type & XTENSA_MPU_TYPE_SYS_W) ? 'W' : '-', 1187 (type & XTENSA_MPU_TYPE_SYS_C) ? 'C' : '-', 1188 (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, 1189 (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, 1190 (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); 1191 break; 1192 default: 1193 qemu_printf("Unknown\n"); 1194 break; 1195 } 1196 } 1197 } 1198 1199 void dump_mmu(CPUXtensaState *env) 1200 { 1201 if (xtensa_option_bits_enabled(env->config, 1202 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | 1203 XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | 1204 XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { 1205 1206 qemu_printf("ITLB:\n"); 1207 dump_tlb(env, false); 1208 qemu_printf("\nDTLB:\n"); 1209 dump_tlb(env, true); 1210 } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { 1211 qemu_printf("Foreground map:\n"); 1212 dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments); 1213 qemu_printf("\nBackground map:\n"); 1214 dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments); 1215 } else { 1216 qemu_printf("No TLB for this CPU core\n"); 1217 } 1218 } 1219