1 /* 2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "cpu.h" 30 #include "exec/helper-proto.h" 31 #include "qemu/host-utils.h" 32 #include "exec/exec-all.h" 33 #include "exec/cpu_ldst.h" 34 #include "exec/address-spaces.h" 35 #include "qemu/timer.h" 36 37 void xtensa_cpu_do_unaligned_access(CPUState *cs, 38 vaddr addr, MMUAccessType access_type, 39 int mmu_idx, uintptr_t retaddr) 40 { 41 XtensaCPU *cpu = XTENSA_CPU(cs); 42 CPUXtensaState *env = &cpu->env; 43 44 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && 45 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { 46 cpu_restore_state(CPU(cpu), retaddr); 47 HELPER(exception_cause_vaddr)(env, 48 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); 49 } 50 } 51 52 void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type, 53 int mmu_idx, uintptr_t retaddr) 54 { 55 XtensaCPU *cpu = XTENSA_CPU(cs); 56 CPUXtensaState *env = &cpu->env; 57 uint32_t paddr; 58 uint32_t page_size; 59 unsigned access; 60 int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx, 61 &paddr, &page_size, &access); 62 63 qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n", 64 __func__, vaddr, access_type, mmu_idx, paddr, ret); 65 66 if (ret == 0) { 67 tlb_set_page(cs, 68 vaddr & TARGET_PAGE_MASK, 69 paddr & TARGET_PAGE_MASK, 70 access, mmu_idx, page_size); 71 } else { 72 cpu_restore_state(cs, retaddr); 73 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); 74 } 75 } 76 77 void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr, 78 bool is_write, bool is_exec, int opaque, 79 unsigned size) 80 { 81 XtensaCPU *cpu = XTENSA_CPU(cs); 82 CPUXtensaState *env = &cpu->env; 83 84 HELPER(exception_cause_vaddr)(env, env->pc, 85 is_exec ? 86 INSTR_PIF_ADDR_ERROR_CAUSE : 87 LOAD_STORE_PIF_ADDR_ERROR_CAUSE, 88 is_exec ? addr : cs->mem_io_vaddr); 89 } 90 91 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) 92 { 93 uint32_t paddr; 94 uint32_t page_size; 95 unsigned access; 96 int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0, 97 &paddr, &page_size, &access); 98 if (ret == 0) { 99 tb_invalidate_phys_addr(&address_space_memory, paddr); 100 } 101 } 102 103 void HELPER(exception)(CPUXtensaState *env, uint32_t excp) 104 { 105 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 106 107 cs->exception_index = excp; 108 if (excp == EXCP_DEBUG) { 109 env->exception_taken = 0; 110 } 111 cpu_loop_exit(cs); 112 } 113 114 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause) 115 { 116 uint32_t vector; 117 118 env->pc = pc; 119 if (env->sregs[PS] & PS_EXCM) { 120 if (env->config->ndepc) { 121 env->sregs[DEPC] = pc; 122 } else { 123 env->sregs[EPC1] = pc; 124 } 125 vector = EXC_DOUBLE; 126 } else { 127 env->sregs[EPC1] = pc; 128 vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; 129 } 130 131 env->sregs[EXCCAUSE] = cause; 132 env->sregs[PS] |= PS_EXCM; 133 134 HELPER(exception)(env, vector); 135 } 136 137 void HELPER(exception_cause_vaddr)(CPUXtensaState *env, 138 uint32_t pc, uint32_t cause, uint32_t vaddr) 139 { 140 env->sregs[EXCVADDR] = vaddr; 141 HELPER(exception_cause)(env, pc, cause); 142 } 143 144 void debug_exception_env(CPUXtensaState *env, uint32_t cause) 145 { 146 if (xtensa_get_cintlevel(env) < env->config->debug_level) { 147 HELPER(debug_exception)(env, env->pc, cause); 148 } 149 } 150 151 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause) 152 { 153 unsigned level = env->config->debug_level; 154 155 env->pc = pc; 156 env->sregs[DEBUGCAUSE] = cause; 157 env->sregs[EPC1 + level - 1] = pc; 158 env->sregs[EPS2 + level - 2] = env->sregs[PS]; 159 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | 160 (level << PS_INTLEVEL_SHIFT); 161 HELPER(exception)(env, EXC_DEBUG); 162 } 163 164 uint32_t HELPER(nsa)(uint32_t v) 165 { 166 if (v & 0x80000000) { 167 v = ~v; 168 } 169 return v ? clz32(v) - 1 : 31; 170 } 171 172 uint32_t HELPER(nsau)(uint32_t v) 173 { 174 return v ? clz32(v) : 32; 175 } 176 177 static void copy_window_from_phys(CPUXtensaState *env, 178 uint32_t window, uint32_t phys, uint32_t n) 179 { 180 assert(phys < env->config->nareg); 181 if (phys + n <= env->config->nareg) { 182 memcpy(env->regs + window, env->phys_regs + phys, 183 n * sizeof(uint32_t)); 184 } else { 185 uint32_t n1 = env->config->nareg - phys; 186 memcpy(env->regs + window, env->phys_regs + phys, 187 n1 * sizeof(uint32_t)); 188 memcpy(env->regs + window + n1, env->phys_regs, 189 (n - n1) * sizeof(uint32_t)); 190 } 191 } 192 193 static void copy_phys_from_window(CPUXtensaState *env, 194 uint32_t phys, uint32_t window, uint32_t n) 195 { 196 assert(phys < env->config->nareg); 197 if (phys + n <= env->config->nareg) { 198 memcpy(env->phys_regs + phys, env->regs + window, 199 n * sizeof(uint32_t)); 200 } else { 201 uint32_t n1 = env->config->nareg - phys; 202 memcpy(env->phys_regs + phys, env->regs + window, 203 n1 * sizeof(uint32_t)); 204 memcpy(env->phys_regs, env->regs + window + n1, 205 (n - n1) * sizeof(uint32_t)); 206 } 207 } 208 209 210 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env) 211 { 212 return a & (env->config->nareg / 4 - 1); 213 } 214 215 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env) 216 { 217 return 1 << windowbase_bound(a, env); 218 } 219 220 void xtensa_sync_window_from_phys(CPUXtensaState *env) 221 { 222 copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16); 223 } 224 225 void xtensa_sync_phys_from_window(CPUXtensaState *env) 226 { 227 copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); 228 } 229 230 static void rotate_window_abs(CPUXtensaState *env, uint32_t position) 231 { 232 xtensa_sync_phys_from_window(env); 233 env->sregs[WINDOW_BASE] = windowbase_bound(position, env); 234 xtensa_sync_window_from_phys(env); 235 } 236 237 static void rotate_window(CPUXtensaState *env, uint32_t delta) 238 { 239 rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta); 240 } 241 242 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v) 243 { 244 rotate_window_abs(env, v); 245 } 246 247 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm) 248 { 249 int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT; 250 if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { 251 qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n", 252 pc, env->sregs[PS]); 253 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); 254 } else { 255 uint32_t windowstart = xtensa_replicate_windowstart(env) >> 256 (env->sregs[WINDOW_BASE] + 1); 257 258 if (windowstart & ((1 << callinc) - 1)) { 259 HELPER(window_check)(env, pc, callinc); 260 } 261 env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3); 262 rotate_window(env, callinc); 263 env->sregs[WINDOW_START] |= 264 windowstart_bit(env->sregs[WINDOW_BASE], env); 265 } 266 } 267 268 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w) 269 { 270 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); 271 uint32_t windowstart = xtensa_replicate_windowstart(env) >> 272 (env->sregs[WINDOW_BASE] + 1); 273 uint32_t n = ctz32(windowstart) + 1; 274 275 assert(n <= w); 276 277 rotate_window(env, n); 278 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | 279 (windowbase << PS_OWB_SHIFT) | PS_EXCM; 280 env->sregs[EPC1] = env->pc = pc; 281 282 switch (ctz32(windowstart >> n)) { 283 case 0: 284 HELPER(exception)(env, EXC_WINDOW_OVERFLOW4); 285 break; 286 case 1: 287 HELPER(exception)(env, EXC_WINDOW_OVERFLOW8); 288 break; 289 default: 290 HELPER(exception)(env, EXC_WINDOW_OVERFLOW12); 291 break; 292 } 293 } 294 295 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc) 296 { 297 int n = (env->regs[0] >> 30) & 0x3; 298 int m = 0; 299 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); 300 uint32_t windowstart = env->sregs[WINDOW_START]; 301 uint32_t ret_pc = 0; 302 303 if (windowstart & windowstart_bit(windowbase - 1, env)) { 304 m = 1; 305 } else if (windowstart & windowstart_bit(windowbase - 2, env)) { 306 m = 2; 307 } else if (windowstart & windowstart_bit(windowbase - 3, env)) { 308 m = 3; 309 } 310 311 if (n == 0 || (m != 0 && m != n) || 312 ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { 313 qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), " 314 "PS = %08x, m = %d, n = %d\n", 315 pc, env->sregs[PS], m, n); 316 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); 317 } else { 318 int owb = windowbase; 319 320 ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); 321 322 rotate_window(env, -n); 323 if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) { 324 env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); 325 } else { 326 /* window underflow */ 327 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | 328 (windowbase << PS_OWB_SHIFT) | PS_EXCM; 329 env->sregs[EPC1] = env->pc = pc; 330 331 if (n == 1) { 332 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4); 333 } else if (n == 2) { 334 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8); 335 } else if (n == 3) { 336 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12); 337 } 338 } 339 } 340 return ret_pc; 341 } 342 343 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4) 344 { 345 rotate_window(env, imm4); 346 } 347 348 void HELPER(restore_owb)(CPUXtensaState *env) 349 { 350 rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); 351 } 352 353 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc) 354 { 355 if ((env->sregs[WINDOW_START] & 356 (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) | 357 windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) | 358 windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { 359 HELPER(exception_cause)(env, pc, ALLOCA_CAUSE); 360 } 361 } 362 363 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v) 364 { 365 if (env->sregs[LBEG] != v) { 366 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 367 env->sregs[LBEG] = v; 368 } 369 } 370 371 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v) 372 { 373 if (env->sregs[LEND] != v) { 374 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 375 env->sregs[LEND] = v; 376 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 377 } 378 } 379 380 void HELPER(dump_state)(CPUXtensaState *env) 381 { 382 XtensaCPU *cpu = xtensa_env_get_cpu(env); 383 384 cpu_dump_state(CPU(cpu), stderr, fprintf, 0); 385 } 386 387 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) 388 { 389 CPUState *cpu; 390 391 env->pc = pc; 392 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | 393 (intlevel << PS_INTLEVEL_SHIFT); 394 check_interrupts(env); 395 if (env->pending_irq_level) { 396 cpu_loop_exit(CPU(xtensa_env_get_cpu(env))); 397 return; 398 } 399 400 cpu = CPU(xtensa_env_get_cpu(env)); 401 env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 402 cpu->halted = 1; 403 if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) { 404 xtensa_rearm_ccompare_timer(env); 405 } 406 HELPER(exception)(env, EXCP_HLT); 407 } 408 409 void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active) 410 { 411 xtensa_timer_irq(env, id, active); 412 } 413 414 void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d) 415 { 416 xtensa_advance_ccount(env, d); 417 } 418 419 void HELPER(check_interrupts)(CPUXtensaState *env) 420 { 421 check_interrupts(env); 422 } 423 424 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) 425 { 426 get_page_addr_code(env, vaddr); 427 } 428 429 /*! 430 * Check vaddr accessibility/cache attributes and raise an exception if 431 * specified by the ATOMCTL SR. 432 * 433 * Note: local memory exclusion is not implemented 434 */ 435 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr) 436 { 437 uint32_t paddr, page_size, access; 438 uint32_t atomctl = env->sregs[ATOMCTL]; 439 int rc = xtensa_get_physical_addr(env, true, vaddr, 1, 440 xtensa_get_cring(env), &paddr, &page_size, &access); 441 442 /* 443 * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions, 444 * see opcode description in the ISA 445 */ 446 if (rc == 0 && 447 (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) { 448 rc = STORE_PROHIBITED_CAUSE; 449 } 450 451 if (rc) { 452 HELPER(exception_cause_vaddr)(env, pc, rc, vaddr); 453 } 454 455 /* 456 * When data cache is not configured use ATOMCTL bypass field. 457 * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL) 458 * under the Conditional Store Option. 459 */ 460 if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) { 461 access = PAGE_CACHE_BYPASS; 462 } 463 464 switch (access & PAGE_CACHE_MASK) { 465 case PAGE_CACHE_WB: 466 atomctl >>= 2; 467 /* fall through */ 468 case PAGE_CACHE_WT: 469 atomctl >>= 2; 470 /* fall through */ 471 case PAGE_CACHE_BYPASS: 472 if ((atomctl & 0x3) == 0) { 473 HELPER(exception_cause_vaddr)(env, pc, 474 LOAD_STORE_ERROR_CAUSE, vaddr); 475 } 476 break; 477 478 case PAGE_CACHE_ISOLATE: 479 HELPER(exception_cause_vaddr)(env, pc, 480 LOAD_STORE_ERROR_CAUSE, vaddr); 481 break; 482 483 default: 484 break; 485 } 486 } 487 488 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 489 { 490 XtensaCPU *cpu = xtensa_env_get_cpu(env); 491 492 v = (v & 0xffffff00) | 0x1; 493 if (v != env->sregs[RASID]) { 494 env->sregs[RASID] = v; 495 tlb_flush(CPU(cpu), 1); 496 } 497 } 498 499 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way) 500 { 501 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; 502 503 switch (way) { 504 case 4: 505 return (tlbcfg >> 16) & 0x3; 506 507 case 5: 508 return (tlbcfg >> 20) & 0x1; 509 510 case 6: 511 return (tlbcfg >> 24) & 0x1; 512 513 default: 514 return 0; 515 } 516 } 517 518 /*! 519 * Get bit mask for the virtual address bits translated by the TLB way 520 */ 521 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 522 { 523 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 524 bool varway56 = dtlb ? 525 env->config->dtlb.varway56 : 526 env->config->itlb.varway56; 527 528 switch (way) { 529 case 4: 530 return 0xfff00000 << get_page_size(env, dtlb, way) * 2; 531 532 case 5: 533 if (varway56) { 534 return 0xf8000000 << get_page_size(env, dtlb, way); 535 } else { 536 return 0xf8000000; 537 } 538 539 case 6: 540 if (varway56) { 541 return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); 542 } else { 543 return 0xf0000000; 544 } 545 546 default: 547 return 0xfffff000; 548 } 549 } else { 550 return REGION_PAGE_MASK; 551 } 552 } 553 554 /*! 555 * Get bit mask for the 'VPN without index' field. 556 * See ISA, 4.6.5.6, data format for RxTLB0 557 */ 558 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 559 { 560 if (way < 4) { 561 bool is32 = (dtlb ? 562 env->config->dtlb.nrefillentries : 563 env->config->itlb.nrefillentries) == 32; 564 return is32 ? 0xffff8000 : 0xffffc000; 565 } else if (way == 4) { 566 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; 567 } else if (way <= 6) { 568 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); 569 bool varway56 = dtlb ? 570 env->config->dtlb.varway56 : 571 env->config->itlb.varway56; 572 573 if (varway56) { 574 return mask << (way == 5 ? 2 : 3); 575 } else { 576 return mask << 1; 577 } 578 } else { 579 return 0xfffff000; 580 } 581 } 582 583 /*! 584 * Split virtual address into VPN (with index) and entry index 585 * for the given TLB way 586 */ 587 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, 588 uint32_t *vpn, uint32_t wi, uint32_t *ei) 589 { 590 bool varway56 = dtlb ? 591 env->config->dtlb.varway56 : 592 env->config->itlb.varway56; 593 594 if (!dtlb) { 595 wi &= 7; 596 } 597 598 if (wi < 4) { 599 bool is32 = (dtlb ? 600 env->config->dtlb.nrefillentries : 601 env->config->itlb.nrefillentries) == 32; 602 *ei = (v >> 12) & (is32 ? 0x7 : 0x3); 603 } else { 604 switch (wi) { 605 case 4: 606 { 607 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; 608 *ei = (v >> eibase) & 0x3; 609 } 610 break; 611 612 case 5: 613 if (varway56) { 614 uint32_t eibase = 27 + get_page_size(env, dtlb, wi); 615 *ei = (v >> eibase) & 0x3; 616 } else { 617 *ei = (v >> 27) & 0x1; 618 } 619 break; 620 621 case 6: 622 if (varway56) { 623 uint32_t eibase = 29 - get_page_size(env, dtlb, wi); 624 *ei = (v >> eibase) & 0x7; 625 } else { 626 *ei = (v >> 28) & 0x1; 627 } 628 break; 629 630 default: 631 *ei = 0; 632 break; 633 } 634 } 635 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); 636 } 637 638 /*! 639 * Split TLB address into TLB way, entry index and VPN (with index). 640 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format 641 */ 642 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, 643 uint32_t *vpn, uint32_t *wi, uint32_t *ei) 644 { 645 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 646 *wi = v & (dtlb ? 0xf : 0x7); 647 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); 648 } else { 649 *vpn = v & REGION_PAGE_MASK; 650 *wi = 0; 651 *ei = (v >> 29) & 0x7; 652 } 653 } 654 655 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, 656 uint32_t v, bool dtlb, uint32_t *pwi) 657 { 658 uint32_t vpn; 659 uint32_t wi; 660 uint32_t ei; 661 662 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); 663 if (pwi) { 664 *pwi = wi; 665 } 666 return xtensa_tlb_get_entry(env, dtlb, wi, ei); 667 } 668 669 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 670 { 671 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 672 uint32_t wi; 673 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 674 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; 675 } else { 676 return v & REGION_PAGE_MASK; 677 } 678 } 679 680 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 681 { 682 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); 683 return entry->paddr | entry->attr; 684 } 685 686 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 687 { 688 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 689 uint32_t wi; 690 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 691 if (entry->variable && entry->asid) { 692 tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr); 693 entry->asid = 0; 694 } 695 } 696 } 697 698 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 699 { 700 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 701 uint32_t wi; 702 uint32_t ei; 703 uint8_t ring; 704 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); 705 706 switch (res) { 707 case 0: 708 if (ring >= xtensa_get_ring(env)) { 709 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); 710 } 711 break; 712 713 case INST_TLB_MULTI_HIT_CAUSE: 714 case LOAD_STORE_TLB_MULTI_HIT_CAUSE: 715 HELPER(exception_cause_vaddr)(env, env->pc, res, v); 716 break; 717 } 718 return 0; 719 } else { 720 return (v & REGION_PAGE_MASK) | 0x1; 721 } 722 } 723 724 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, 725 xtensa_tlb_entry *entry, bool dtlb, 726 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) 727 { 728 entry->vaddr = vpn; 729 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); 730 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; 731 entry->attr = pte & 0xf; 732 } 733 734 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, 735 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) 736 { 737 XtensaCPU *cpu = xtensa_env_get_cpu(env); 738 CPUState *cs = CPU(cpu); 739 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 740 741 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 742 if (entry->variable) { 743 if (entry->asid) { 744 tlb_flush_page(cs, entry->vaddr); 745 } 746 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); 747 tlb_flush_page(cs, entry->vaddr); 748 } else { 749 qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n", 750 __func__, dtlb, wi, ei); 751 } 752 } else { 753 tlb_flush_page(cs, entry->vaddr); 754 if (xtensa_option_enabled(env->config, 755 XTENSA_OPTION_REGION_TRANSLATION)) { 756 entry->paddr = pte & REGION_PAGE_MASK; 757 } 758 entry->attr = pte & 0xf; 759 } 760 } 761 762 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) 763 { 764 uint32_t vpn; 765 uint32_t wi; 766 uint32_t ei; 767 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); 768 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); 769 } 770 771 772 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) 773 { 774 uint32_t change = v ^ env->sregs[IBREAKENABLE]; 775 unsigned i; 776 777 for (i = 0; i < env->config->nibreak; ++i) { 778 if (change & (1 << i)) { 779 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); 780 } 781 } 782 env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); 783 } 784 785 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) 786 { 787 if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { 788 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); 789 tb_invalidate_virtual_addr(env, v); 790 } 791 env->sregs[IBREAKA + i] = v; 792 } 793 794 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, 795 uint32_t dbreakc) 796 { 797 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 798 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 799 uint32_t mask = dbreakc | ~DBREAKC_MASK; 800 801 if (env->cpu_watchpoint[i]) { 802 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]); 803 } 804 if (dbreakc & DBREAKC_SB) { 805 flags |= BP_MEM_WRITE; 806 } 807 if (dbreakc & DBREAKC_LB) { 808 flags |= BP_MEM_READ; 809 } 810 /* contiguous mask after inversion is one less than some power of 2 */ 811 if ((~mask + 1) & ~mask) { 812 qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc); 813 /* cut mask after the first zero bit */ 814 mask = 0xffffffff << (32 - clo32(mask)); 815 } 816 if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1, 817 flags, &env->cpu_watchpoint[i])) { 818 env->cpu_watchpoint[i] = NULL; 819 qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n", 820 dbreaka & mask, ~mask + 1); 821 } 822 } 823 824 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) 825 { 826 uint32_t dbreakc = env->sregs[DBREAKC + i]; 827 828 if ((dbreakc & DBREAKC_SB_LB) && 829 env->sregs[DBREAKA + i] != v) { 830 set_dbreak(env, i, v, dbreakc); 831 } 832 env->sregs[DBREAKA + i] = v; 833 } 834 835 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v) 836 { 837 if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) { 838 if (v & DBREAKC_SB_LB) { 839 set_dbreak(env, i, env->sregs[DBREAKA + i], v); 840 } else { 841 if (env->cpu_watchpoint[i]) { 842 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 843 844 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]); 845 env->cpu_watchpoint[i] = NULL; 846 } 847 } 848 } 849 env->sregs[DBREAKC + i] = v; 850 } 851 852 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v) 853 { 854 static const int rounding_mode[] = { 855 float_round_nearest_even, 856 float_round_to_zero, 857 float_round_up, 858 float_round_down, 859 }; 860 861 env->uregs[FCR] = v & 0xfffff07f; 862 set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status); 863 } 864 865 float32 HELPER(abs_s)(float32 v) 866 { 867 return float32_abs(v); 868 } 869 870 float32 HELPER(neg_s)(float32 v) 871 { 872 return float32_chs(v); 873 } 874 875 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b) 876 { 877 return float32_add(a, b, &env->fp_status); 878 } 879 880 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b) 881 { 882 return float32_sub(a, b, &env->fp_status); 883 } 884 885 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b) 886 { 887 return float32_mul(a, b, &env->fp_status); 888 } 889 890 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) 891 { 892 return float32_muladd(b, c, a, 0, 893 &env->fp_status); 894 } 895 896 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) 897 { 898 return float32_muladd(b, c, a, float_muladd_negate_product, 899 &env->fp_status); 900 } 901 902 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale) 903 { 904 float_status fp_status = {0}; 905 906 set_float_rounding_mode(rounding_mode, &fp_status); 907 return float32_to_int32( 908 float32_scalbn(v, scale, &fp_status), &fp_status); 909 } 910 911 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale) 912 { 913 float_status fp_status = {0}; 914 float32 res; 915 916 set_float_rounding_mode(rounding_mode, &fp_status); 917 918 res = float32_scalbn(v, scale, &fp_status); 919 920 if (float32_is_neg(v) && !float32_is_any_nan(v)) { 921 return float32_to_int32(res, &fp_status); 922 } else { 923 return float32_to_uint32(res, &fp_status); 924 } 925 } 926 927 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale) 928 { 929 return float32_scalbn(int32_to_float32(v, &env->fp_status), 930 (int32_t)scale, &env->fp_status); 931 } 932 933 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale) 934 { 935 return float32_scalbn(uint32_to_float32(v, &env->fp_status), 936 (int32_t)scale, &env->fp_status); 937 } 938 939 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br) 940 { 941 if (v) { 942 env->sregs[BR] |= br; 943 } else { 944 env->sregs[BR] &= ~br; 945 } 946 } 947 948 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 949 { 950 set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br); 951 } 952 953 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 954 { 955 set_br(env, float32_eq_quiet(a, b, &env->fp_status), br); 956 } 957 958 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 959 { 960 int v = float32_compare_quiet(a, b, &env->fp_status); 961 set_br(env, v == float_relation_equal || v == float_relation_unordered, br); 962 } 963 964 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 965 { 966 set_br(env, float32_lt_quiet(a, b, &env->fp_status), br); 967 } 968 969 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 970 { 971 int v = float32_compare_quiet(a, b, &env->fp_status); 972 set_br(env, v == float_relation_less || v == float_relation_unordered, br); 973 } 974 975 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 976 { 977 set_br(env, float32_le_quiet(a, b, &env->fp_status), br); 978 } 979 980 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 981 { 982 int v = float32_compare_quiet(a, b, &env->fp_status); 983 set_br(env, v != float_relation_greater, br); 984 } 985