1 /* 2 * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * * Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * * Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * * Neither the name of the Open Source and Linux Lab nor the 13 * names of its contributors may be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #include "qemu/osdep.h" 29 #include "qemu/main-loop.h" 30 #include "cpu.h" 31 #include "exec/helper-proto.h" 32 #include "qemu/host-utils.h" 33 #include "exec/exec-all.h" 34 #include "exec/cpu_ldst.h" 35 #include "exec/address-spaces.h" 36 #include "qemu/timer.h" 37 #include "fpu/softfloat.h" 38 39 void xtensa_cpu_do_unaligned_access(CPUState *cs, 40 vaddr addr, MMUAccessType access_type, 41 int mmu_idx, uintptr_t retaddr) 42 { 43 XtensaCPU *cpu = XTENSA_CPU(cs); 44 CPUXtensaState *env = &cpu->env; 45 46 if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) && 47 !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) { 48 cpu_restore_state(CPU(cpu), retaddr); 49 HELPER(exception_cause_vaddr)(env, 50 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr); 51 } 52 } 53 54 void tlb_fill(CPUState *cs, target_ulong vaddr, int size, 55 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 56 { 57 XtensaCPU *cpu = XTENSA_CPU(cs); 58 CPUXtensaState *env = &cpu->env; 59 uint32_t paddr; 60 uint32_t page_size; 61 unsigned access; 62 int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx, 63 &paddr, &page_size, &access); 64 65 qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n", 66 __func__, vaddr, access_type, mmu_idx, paddr, ret); 67 68 if (ret == 0) { 69 tlb_set_page(cs, 70 vaddr & TARGET_PAGE_MASK, 71 paddr & TARGET_PAGE_MASK, 72 access, mmu_idx, page_size); 73 } else { 74 cpu_restore_state(cs, retaddr); 75 HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr); 76 } 77 } 78 79 void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr, 80 bool is_write, bool is_exec, int opaque, 81 unsigned size) 82 { 83 XtensaCPU *cpu = XTENSA_CPU(cs); 84 CPUXtensaState *env = &cpu->env; 85 86 HELPER(exception_cause_vaddr)(env, env->pc, 87 is_exec ? 88 INSTR_PIF_ADDR_ERROR_CAUSE : 89 LOAD_STORE_PIF_ADDR_ERROR_CAUSE, 90 is_exec ? addr : cs->mem_io_vaddr); 91 } 92 93 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr) 94 { 95 uint32_t paddr; 96 uint32_t page_size; 97 unsigned access; 98 int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0, 99 &paddr, &page_size, &access); 100 if (ret == 0) { 101 tb_invalidate_phys_addr(&address_space_memory, paddr); 102 } 103 } 104 105 void HELPER(exception)(CPUXtensaState *env, uint32_t excp) 106 { 107 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 108 109 cs->exception_index = excp; 110 if (excp == EXCP_YIELD) { 111 env->yield_needed = 0; 112 } 113 if (excp == EXCP_DEBUG) { 114 env->exception_taken = 0; 115 } 116 cpu_loop_exit(cs); 117 } 118 119 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause) 120 { 121 uint32_t vector; 122 123 env->pc = pc; 124 if (env->sregs[PS] & PS_EXCM) { 125 if (env->config->ndepc) { 126 env->sregs[DEPC] = pc; 127 } else { 128 env->sregs[EPC1] = pc; 129 } 130 vector = EXC_DOUBLE; 131 } else { 132 env->sregs[EPC1] = pc; 133 vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; 134 } 135 136 env->sregs[EXCCAUSE] = cause; 137 env->sregs[PS] |= PS_EXCM; 138 139 HELPER(exception)(env, vector); 140 } 141 142 void HELPER(exception_cause_vaddr)(CPUXtensaState *env, 143 uint32_t pc, uint32_t cause, uint32_t vaddr) 144 { 145 env->sregs[EXCVADDR] = vaddr; 146 HELPER(exception_cause)(env, pc, cause); 147 } 148 149 void debug_exception_env(CPUXtensaState *env, uint32_t cause) 150 { 151 if (xtensa_get_cintlevel(env) < env->config->debug_level) { 152 HELPER(debug_exception)(env, env->pc, cause); 153 } 154 } 155 156 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause) 157 { 158 unsigned level = env->config->debug_level; 159 160 env->pc = pc; 161 env->sregs[DEBUGCAUSE] = cause; 162 env->sregs[EPC1 + level - 1] = pc; 163 env->sregs[EPS2 + level - 2] = env->sregs[PS]; 164 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | 165 (level << PS_INTLEVEL_SHIFT); 166 HELPER(exception)(env, EXC_DEBUG); 167 } 168 169 static void copy_window_from_phys(CPUXtensaState *env, 170 uint32_t window, uint32_t phys, uint32_t n) 171 { 172 assert(phys < env->config->nareg); 173 if (phys + n <= env->config->nareg) { 174 memcpy(env->regs + window, env->phys_regs + phys, 175 n * sizeof(uint32_t)); 176 } else { 177 uint32_t n1 = env->config->nareg - phys; 178 memcpy(env->regs + window, env->phys_regs + phys, 179 n1 * sizeof(uint32_t)); 180 memcpy(env->regs + window + n1, env->phys_regs, 181 (n - n1) * sizeof(uint32_t)); 182 } 183 } 184 185 static void copy_phys_from_window(CPUXtensaState *env, 186 uint32_t phys, uint32_t window, uint32_t n) 187 { 188 assert(phys < env->config->nareg); 189 if (phys + n <= env->config->nareg) { 190 memcpy(env->phys_regs + phys, env->regs + window, 191 n * sizeof(uint32_t)); 192 } else { 193 uint32_t n1 = env->config->nareg - phys; 194 memcpy(env->phys_regs + phys, env->regs + window, 195 n1 * sizeof(uint32_t)); 196 memcpy(env->phys_regs, env->regs + window + n1, 197 (n - n1) * sizeof(uint32_t)); 198 } 199 } 200 201 202 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env) 203 { 204 return a & (env->config->nareg / 4 - 1); 205 } 206 207 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env) 208 { 209 return 1 << windowbase_bound(a, env); 210 } 211 212 void xtensa_sync_window_from_phys(CPUXtensaState *env) 213 { 214 copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16); 215 } 216 217 void xtensa_sync_phys_from_window(CPUXtensaState *env) 218 { 219 copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16); 220 } 221 222 static void rotate_window_abs(CPUXtensaState *env, uint32_t position) 223 { 224 xtensa_sync_phys_from_window(env); 225 env->sregs[WINDOW_BASE] = windowbase_bound(position, env); 226 xtensa_sync_window_from_phys(env); 227 } 228 229 static void rotate_window(CPUXtensaState *env, uint32_t delta) 230 { 231 rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta); 232 } 233 234 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v) 235 { 236 rotate_window_abs(env, v); 237 } 238 239 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm) 240 { 241 int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT; 242 if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { 243 qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n", 244 pc, env->sregs[PS]); 245 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); 246 } else { 247 uint32_t windowstart = xtensa_replicate_windowstart(env) >> 248 (env->sregs[WINDOW_BASE] + 1); 249 250 if (windowstart & ((1 << callinc) - 1)) { 251 HELPER(window_check)(env, pc, callinc); 252 } 253 env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm; 254 rotate_window(env, callinc); 255 env->sregs[WINDOW_START] |= 256 windowstart_bit(env->sregs[WINDOW_BASE], env); 257 } 258 } 259 260 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w) 261 { 262 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); 263 uint32_t windowstart = xtensa_replicate_windowstart(env) >> 264 (env->sregs[WINDOW_BASE] + 1); 265 uint32_t n = ctz32(windowstart) + 1; 266 267 assert(n <= w); 268 269 rotate_window(env, n); 270 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | 271 (windowbase << PS_OWB_SHIFT) | PS_EXCM; 272 env->sregs[EPC1] = env->pc = pc; 273 274 switch (ctz32(windowstart >> n)) { 275 case 0: 276 HELPER(exception)(env, EXC_WINDOW_OVERFLOW4); 277 break; 278 case 1: 279 HELPER(exception)(env, EXC_WINDOW_OVERFLOW8); 280 break; 281 default: 282 HELPER(exception)(env, EXC_WINDOW_OVERFLOW12); 283 break; 284 } 285 } 286 287 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc) 288 { 289 int n = (env->regs[0] >> 30) & 0x3; 290 int m = 0; 291 uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env); 292 uint32_t windowstart = env->sregs[WINDOW_START]; 293 uint32_t ret_pc = 0; 294 295 if (windowstart & windowstart_bit(windowbase - 1, env)) { 296 m = 1; 297 } else if (windowstart & windowstart_bit(windowbase - 2, env)) { 298 m = 2; 299 } else if (windowstart & windowstart_bit(windowbase - 3, env)) { 300 m = 3; 301 } 302 303 if (n == 0 || (m != 0 && m != n) || 304 ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) { 305 qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), " 306 "PS = %08x, m = %d, n = %d\n", 307 pc, env->sregs[PS], m, n); 308 HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE); 309 } else { 310 int owb = windowbase; 311 312 ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff); 313 314 rotate_window(env, -n); 315 if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) { 316 env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env); 317 } else { 318 /* window underflow */ 319 env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) | 320 (windowbase << PS_OWB_SHIFT) | PS_EXCM; 321 env->sregs[EPC1] = env->pc = pc; 322 323 if (n == 1) { 324 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4); 325 } else if (n == 2) { 326 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8); 327 } else if (n == 3) { 328 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12); 329 } 330 } 331 } 332 return ret_pc; 333 } 334 335 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4) 336 { 337 rotate_window(env, imm4); 338 } 339 340 void HELPER(restore_owb)(CPUXtensaState *env) 341 { 342 rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT); 343 } 344 345 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc) 346 { 347 if ((env->sregs[WINDOW_START] & 348 (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) | 349 windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) | 350 windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) { 351 HELPER(exception_cause)(env, pc, ALLOCA_CAUSE); 352 } 353 } 354 355 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v) 356 { 357 if (env->sregs[LBEG] != v) { 358 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 359 env->sregs[LBEG] = v; 360 } 361 } 362 363 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v) 364 { 365 if (env->sregs[LEND] != v) { 366 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 367 env->sregs[LEND] = v; 368 tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1); 369 } 370 } 371 372 void HELPER(dump_state)(CPUXtensaState *env) 373 { 374 XtensaCPU *cpu = xtensa_env_get_cpu(env); 375 376 cpu_dump_state(CPU(cpu), stderr, fprintf, 0); 377 } 378 379 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) 380 { 381 CPUState *cpu; 382 383 env->pc = pc; 384 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | 385 (intlevel << PS_INTLEVEL_SHIFT); 386 387 qemu_mutex_lock_iothread(); 388 check_interrupts(env); 389 qemu_mutex_unlock_iothread(); 390 391 if (env->pending_irq_level) { 392 cpu_loop_exit(CPU(xtensa_env_get_cpu(env))); 393 return; 394 } 395 396 cpu = CPU(xtensa_env_get_cpu(env)); 397 cpu->halted = 1; 398 HELPER(exception)(env, EXCP_HLT); 399 } 400 401 void HELPER(update_ccount)(CPUXtensaState *env) 402 { 403 uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 404 405 env->ccount_time = now; 406 env->sregs[CCOUNT] = env->ccount_base + 407 (uint32_t)((now - env->time_base) * 408 env->config->clock_freq_khz / 1000000); 409 } 410 411 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v) 412 { 413 int i; 414 415 HELPER(update_ccount)(env); 416 env->ccount_base += v - env->sregs[CCOUNT]; 417 for (i = 0; i < env->config->nccompare; ++i) { 418 HELPER(update_ccompare)(env, i); 419 } 420 } 421 422 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i) 423 { 424 uint64_t dcc; 425 426 HELPER(update_ccount)(env); 427 dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1; 428 timer_mod(env->ccompare[i].timer, 429 env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz); 430 env->yield_needed = 1; 431 } 432 433 void HELPER(check_interrupts)(CPUXtensaState *env) 434 { 435 qemu_mutex_lock_iothread(); 436 check_interrupts(env); 437 qemu_mutex_unlock_iothread(); 438 } 439 440 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) 441 { 442 get_page_addr_code(env, vaddr); 443 } 444 445 /*! 446 * Check vaddr accessibility/cache attributes and raise an exception if 447 * specified by the ATOMCTL SR. 448 * 449 * Note: local memory exclusion is not implemented 450 */ 451 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr) 452 { 453 uint32_t paddr, page_size, access; 454 uint32_t atomctl = env->sregs[ATOMCTL]; 455 int rc = xtensa_get_physical_addr(env, true, vaddr, 1, 456 xtensa_get_cring(env), &paddr, &page_size, &access); 457 458 /* 459 * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions, 460 * see opcode description in the ISA 461 */ 462 if (rc == 0 && 463 (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) { 464 rc = STORE_PROHIBITED_CAUSE; 465 } 466 467 if (rc) { 468 HELPER(exception_cause_vaddr)(env, pc, rc, vaddr); 469 } 470 471 /* 472 * When data cache is not configured use ATOMCTL bypass field. 473 * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL) 474 * under the Conditional Store Option. 475 */ 476 if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) { 477 access = PAGE_CACHE_BYPASS; 478 } 479 480 switch (access & PAGE_CACHE_MASK) { 481 case PAGE_CACHE_WB: 482 atomctl >>= 2; 483 /* fall through */ 484 case PAGE_CACHE_WT: 485 atomctl >>= 2; 486 /* fall through */ 487 case PAGE_CACHE_BYPASS: 488 if ((atomctl & 0x3) == 0) { 489 HELPER(exception_cause_vaddr)(env, pc, 490 LOAD_STORE_ERROR_CAUSE, vaddr); 491 } 492 break; 493 494 case PAGE_CACHE_ISOLATE: 495 HELPER(exception_cause_vaddr)(env, pc, 496 LOAD_STORE_ERROR_CAUSE, vaddr); 497 break; 498 499 default: 500 break; 501 } 502 } 503 504 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v) 505 { 506 if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) { 507 if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) > 508 env->config->icache_ways) { 509 deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN, 510 env->config->icache_ways); 511 } 512 } 513 if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) { 514 if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) > 515 env->config->dcache_ways) { 516 deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN, 517 env->config->dcache_ways); 518 } 519 if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) > 520 env->config->dcache_ways) { 521 deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN, 522 env->config->dcache_ways); 523 } 524 } 525 env->sregs[MEMCTL] = v & env->config->memctl_mask; 526 } 527 528 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 529 { 530 XtensaCPU *cpu = xtensa_env_get_cpu(env); 531 532 v = (v & 0xffffff00) | 0x1; 533 if (v != env->sregs[RASID]) { 534 env->sregs[RASID] = v; 535 tlb_flush(CPU(cpu)); 536 } 537 } 538 539 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way) 540 { 541 uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; 542 543 switch (way) { 544 case 4: 545 return (tlbcfg >> 16) & 0x3; 546 547 case 5: 548 return (tlbcfg >> 20) & 0x1; 549 550 case 6: 551 return (tlbcfg >> 24) & 0x1; 552 553 default: 554 return 0; 555 } 556 } 557 558 /*! 559 * Get bit mask for the virtual address bits translated by the TLB way 560 */ 561 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 562 { 563 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 564 bool varway56 = dtlb ? 565 env->config->dtlb.varway56 : 566 env->config->itlb.varway56; 567 568 switch (way) { 569 case 4: 570 return 0xfff00000 << get_page_size(env, dtlb, way) * 2; 571 572 case 5: 573 if (varway56) { 574 return 0xf8000000 << get_page_size(env, dtlb, way); 575 } else { 576 return 0xf8000000; 577 } 578 579 case 6: 580 if (varway56) { 581 return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); 582 } else { 583 return 0xf0000000; 584 } 585 586 default: 587 return 0xfffff000; 588 } 589 } else { 590 return REGION_PAGE_MASK; 591 } 592 } 593 594 /*! 595 * Get bit mask for the 'VPN without index' field. 596 * See ISA, 4.6.5.6, data format for RxTLB0 597 */ 598 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) 599 { 600 if (way < 4) { 601 bool is32 = (dtlb ? 602 env->config->dtlb.nrefillentries : 603 env->config->itlb.nrefillentries) == 32; 604 return is32 ? 0xffff8000 : 0xffffc000; 605 } else if (way == 4) { 606 return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; 607 } else if (way <= 6) { 608 uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); 609 bool varway56 = dtlb ? 610 env->config->dtlb.varway56 : 611 env->config->itlb.varway56; 612 613 if (varway56) { 614 return mask << (way == 5 ? 2 : 3); 615 } else { 616 return mask << 1; 617 } 618 } else { 619 return 0xfffff000; 620 } 621 } 622 623 /*! 624 * Split virtual address into VPN (with index) and entry index 625 * for the given TLB way 626 */ 627 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb, 628 uint32_t *vpn, uint32_t wi, uint32_t *ei) 629 { 630 bool varway56 = dtlb ? 631 env->config->dtlb.varway56 : 632 env->config->itlb.varway56; 633 634 if (!dtlb) { 635 wi &= 7; 636 } 637 638 if (wi < 4) { 639 bool is32 = (dtlb ? 640 env->config->dtlb.nrefillentries : 641 env->config->itlb.nrefillentries) == 32; 642 *ei = (v >> 12) & (is32 ? 0x7 : 0x3); 643 } else { 644 switch (wi) { 645 case 4: 646 { 647 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; 648 *ei = (v >> eibase) & 0x3; 649 } 650 break; 651 652 case 5: 653 if (varway56) { 654 uint32_t eibase = 27 + get_page_size(env, dtlb, wi); 655 *ei = (v >> eibase) & 0x3; 656 } else { 657 *ei = (v >> 27) & 0x1; 658 } 659 break; 660 661 case 6: 662 if (varway56) { 663 uint32_t eibase = 29 - get_page_size(env, dtlb, wi); 664 *ei = (v >> eibase) & 0x7; 665 } else { 666 *ei = (v >> 28) & 0x1; 667 } 668 break; 669 670 default: 671 *ei = 0; 672 break; 673 } 674 } 675 *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); 676 } 677 678 /*! 679 * Split TLB address into TLB way, entry index and VPN (with index). 680 * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format 681 */ 682 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, 683 uint32_t *vpn, uint32_t *wi, uint32_t *ei) 684 { 685 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 686 *wi = v & (dtlb ? 0xf : 0x7); 687 split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); 688 } else { 689 *vpn = v & REGION_PAGE_MASK; 690 *wi = 0; 691 *ei = (v >> 29) & 0x7; 692 } 693 } 694 695 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, 696 uint32_t v, bool dtlb, uint32_t *pwi) 697 { 698 uint32_t vpn; 699 uint32_t wi; 700 uint32_t ei; 701 702 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); 703 if (pwi) { 704 *pwi = wi; 705 } 706 return xtensa_tlb_get_entry(env, dtlb, wi, ei); 707 } 708 709 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 710 { 711 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 712 uint32_t wi; 713 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 714 return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; 715 } else { 716 return v & REGION_PAGE_MASK; 717 } 718 } 719 720 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 721 { 722 const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); 723 return entry->paddr | entry->attr; 724 } 725 726 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 727 { 728 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 729 uint32_t wi; 730 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 731 if (entry->variable && entry->asid) { 732 tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr); 733 entry->asid = 0; 734 } 735 } 736 } 737 738 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) 739 { 740 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 741 uint32_t wi; 742 uint32_t ei; 743 uint8_t ring; 744 int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); 745 746 switch (res) { 747 case 0: 748 if (ring >= xtensa_get_ring(env)) { 749 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); 750 } 751 break; 752 753 case INST_TLB_MULTI_HIT_CAUSE: 754 case LOAD_STORE_TLB_MULTI_HIT_CAUSE: 755 HELPER(exception_cause_vaddr)(env, env->pc, res, v); 756 break; 757 } 758 return 0; 759 } else { 760 return (v & REGION_PAGE_MASK) | 0x1; 761 } 762 } 763 764 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, 765 xtensa_tlb_entry *entry, bool dtlb, 766 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) 767 { 768 entry->vaddr = vpn; 769 entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); 770 entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; 771 entry->attr = pte & 0xf; 772 } 773 774 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, 775 unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte) 776 { 777 XtensaCPU *cpu = xtensa_env_get_cpu(env); 778 CPUState *cs = CPU(cpu); 779 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 780 781 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { 782 if (entry->variable) { 783 if (entry->asid) { 784 tlb_flush_page(cs, entry->vaddr); 785 } 786 xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); 787 tlb_flush_page(cs, entry->vaddr); 788 } else { 789 qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n", 790 __func__, dtlb, wi, ei); 791 } 792 } else { 793 tlb_flush_page(cs, entry->vaddr); 794 if (xtensa_option_enabled(env->config, 795 XTENSA_OPTION_REGION_TRANSLATION)) { 796 entry->paddr = pte & REGION_PAGE_MASK; 797 } 798 entry->attr = pte & 0xf; 799 } 800 } 801 802 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) 803 { 804 uint32_t vpn; 805 uint32_t wi; 806 uint32_t ei; 807 split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); 808 xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); 809 } 810 811 812 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v) 813 { 814 uint32_t change = v ^ env->sregs[IBREAKENABLE]; 815 unsigned i; 816 817 for (i = 0; i < env->config->nibreak; ++i) { 818 if (change & (1 << i)) { 819 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); 820 } 821 } 822 env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1); 823 } 824 825 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) 826 { 827 if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) { 828 tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]); 829 tb_invalidate_virtual_addr(env, v); 830 } 831 env->sregs[IBREAKA + i] = v; 832 } 833 834 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, 835 uint32_t dbreakc) 836 { 837 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 838 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 839 uint32_t mask = dbreakc | ~DBREAKC_MASK; 840 841 if (env->cpu_watchpoint[i]) { 842 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]); 843 } 844 if (dbreakc & DBREAKC_SB) { 845 flags |= BP_MEM_WRITE; 846 } 847 if (dbreakc & DBREAKC_LB) { 848 flags |= BP_MEM_READ; 849 } 850 /* contiguous mask after inversion is one less than some power of 2 */ 851 if ((~mask + 1) & ~mask) { 852 qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc); 853 /* cut mask after the first zero bit */ 854 mask = 0xffffffff << (32 - clo32(mask)); 855 } 856 if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1, 857 flags, &env->cpu_watchpoint[i])) { 858 env->cpu_watchpoint[i] = NULL; 859 qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n", 860 dbreaka & mask, ~mask + 1); 861 } 862 } 863 864 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v) 865 { 866 uint32_t dbreakc = env->sregs[DBREAKC + i]; 867 868 if ((dbreakc & DBREAKC_SB_LB) && 869 env->sregs[DBREAKA + i] != v) { 870 set_dbreak(env, i, v, dbreakc); 871 } 872 env->sregs[DBREAKA + i] = v; 873 } 874 875 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v) 876 { 877 if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) { 878 if (v & DBREAKC_SB_LB) { 879 set_dbreak(env, i, env->sregs[DBREAKA + i], v); 880 } else { 881 if (env->cpu_watchpoint[i]) { 882 CPUState *cs = CPU(xtensa_env_get_cpu(env)); 883 884 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]); 885 env->cpu_watchpoint[i] = NULL; 886 } 887 } 888 } 889 env->sregs[DBREAKC + i] = v; 890 } 891 892 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v) 893 { 894 static const int rounding_mode[] = { 895 float_round_nearest_even, 896 float_round_to_zero, 897 float_round_up, 898 float_round_down, 899 }; 900 901 env->uregs[FCR] = v & 0xfffff07f; 902 set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status); 903 } 904 905 float32 HELPER(abs_s)(float32 v) 906 { 907 return float32_abs(v); 908 } 909 910 float32 HELPER(neg_s)(float32 v) 911 { 912 return float32_chs(v); 913 } 914 915 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b) 916 { 917 return float32_add(a, b, &env->fp_status); 918 } 919 920 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b) 921 { 922 return float32_sub(a, b, &env->fp_status); 923 } 924 925 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b) 926 { 927 return float32_mul(a, b, &env->fp_status); 928 } 929 930 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) 931 { 932 return float32_muladd(b, c, a, 0, 933 &env->fp_status); 934 } 935 936 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c) 937 { 938 return float32_muladd(b, c, a, float_muladd_negate_product, 939 &env->fp_status); 940 } 941 942 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale) 943 { 944 float_status fp_status = {0}; 945 946 set_float_rounding_mode(rounding_mode, &fp_status); 947 return float32_to_int32( 948 float32_scalbn(v, scale, &fp_status), &fp_status); 949 } 950 951 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale) 952 { 953 float_status fp_status = {0}; 954 float32 res; 955 956 set_float_rounding_mode(rounding_mode, &fp_status); 957 958 res = float32_scalbn(v, scale, &fp_status); 959 960 if (float32_is_neg(v) && !float32_is_any_nan(v)) { 961 return float32_to_int32(res, &fp_status); 962 } else { 963 return float32_to_uint32(res, &fp_status); 964 } 965 } 966 967 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale) 968 { 969 return float32_scalbn(int32_to_float32(v, &env->fp_status), 970 (int32_t)scale, &env->fp_status); 971 } 972 973 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale) 974 { 975 return float32_scalbn(uint32_to_float32(v, &env->fp_status), 976 (int32_t)scale, &env->fp_status); 977 } 978 979 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br) 980 { 981 if (v) { 982 env->sregs[BR] |= br; 983 } else { 984 env->sregs[BR] &= ~br; 985 } 986 } 987 988 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 989 { 990 set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br); 991 } 992 993 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 994 { 995 set_br(env, float32_eq_quiet(a, b, &env->fp_status), br); 996 } 997 998 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 999 { 1000 int v = float32_compare_quiet(a, b, &env->fp_status); 1001 set_br(env, v == float_relation_equal || v == float_relation_unordered, br); 1002 } 1003 1004 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 1005 { 1006 set_br(env, float32_lt_quiet(a, b, &env->fp_status), br); 1007 } 1008 1009 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 1010 { 1011 int v = float32_compare_quiet(a, b, &env->fp_status); 1012 set_br(env, v == float_relation_less || v == float_relation_unordered, br); 1013 } 1014 1015 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 1016 { 1017 set_br(env, float32_le_quiet(a, b, &env->fp_status), br); 1018 } 1019 1020 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b) 1021 { 1022 int v = float32_compare_quiet(a, b, &env->fp_status); 1023 set_br(env, v != float_relation_greater, br); 1024 } 1025 1026 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr) 1027 { 1028 return address_space_ldl(env->address_space_er, addr, 1029 MEMTXATTRS_UNSPECIFIED, NULL); 1030 } 1031 1032 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr) 1033 { 1034 address_space_stl(env->address_space_er, addr, data, 1035 MEMTXATTRS_UNSPECIFIED, NULL); 1036 } 1037