xref: /openbmc/qemu/target/xtensa/op_helper.c (revision 46d0885a)
1 /*
2  * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above copyright
10  *       notice, this list of conditions and the following disclaimer in the
11  *       documentation and/or other materials provided with the distribution.
12  *     * Neither the name of the Open Source and Linux Lab nor the
13  *       names of its contributors may be used to endorse or promote products
14  *       derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "cpu.h"
31 #include "exec/helper-proto.h"
32 #include "qemu/host-utils.h"
33 #include "exec/exec-all.h"
34 #include "exec/cpu_ldst.h"
35 #include "exec/address-spaces.h"
36 #include "qemu/timer.h"
37 #include "fpu/softfloat.h"
38 
39 #ifndef CONFIG_USER_ONLY
40 
41 void xtensa_cpu_do_unaligned_access(CPUState *cs,
42         vaddr addr, MMUAccessType access_type,
43         int mmu_idx, uintptr_t retaddr)
44 {
45     XtensaCPU *cpu = XTENSA_CPU(cs);
46     CPUXtensaState *env = &cpu->env;
47 
48     if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
49             !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
50         cpu_restore_state(CPU(cpu), retaddr, true);
51         HELPER(exception_cause_vaddr)(env,
52                 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
53     }
54 }
55 
56 void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
57               MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
58 {
59     XtensaCPU *cpu = XTENSA_CPU(cs);
60     CPUXtensaState *env = &cpu->env;
61     uint32_t paddr;
62     uint32_t page_size;
63     unsigned access;
64     int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
65             &paddr, &page_size, &access);
66 
67     qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
68                   __func__, vaddr, access_type, mmu_idx, paddr, ret);
69 
70     if (ret == 0) {
71         tlb_set_page(cs,
72                      vaddr & TARGET_PAGE_MASK,
73                      paddr & TARGET_PAGE_MASK,
74                      access, mmu_idx, page_size);
75     } else {
76         cpu_restore_state(cs, retaddr, true);
77         HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
78     }
79 }
80 
81 void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr,
82                                      bool is_write, bool is_exec, int opaque,
83                                      unsigned size)
84 {
85     XtensaCPU *cpu = XTENSA_CPU(cs);
86     CPUXtensaState *env = &cpu->env;
87 
88     HELPER(exception_cause_vaddr)(env, env->pc,
89                                   is_exec ?
90                                   INSTR_PIF_ADDR_ERROR_CAUSE :
91                                   LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
92                                   is_exec ? addr : cs->mem_io_vaddr);
93 }
94 
95 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
96 {
97     uint32_t paddr;
98     uint32_t page_size;
99     unsigned access;
100     int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
101             &paddr, &page_size, &access);
102     if (ret == 0) {
103         tb_invalidate_phys_addr(&address_space_memory, paddr,
104                                 MEMTXATTRS_UNSPECIFIED);
105     }
106 }
107 
108 #else
109 
110 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
111 {
112     tb_invalidate_phys_addr(vaddr);
113 }
114 
115 #endif
116 
117 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
118 {
119     CPUState *cs = CPU(xtensa_env_get_cpu(env));
120 
121     cs->exception_index = excp;
122     if (excp == EXCP_YIELD) {
123         env->yield_needed = 0;
124     }
125     if (excp == EXCP_DEBUG) {
126         env->exception_taken = 0;
127     }
128     cpu_loop_exit(cs);
129 }
130 
131 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
132 {
133     uint32_t vector;
134 
135     env->pc = pc;
136     if (env->sregs[PS] & PS_EXCM) {
137         if (env->config->ndepc) {
138             env->sregs[DEPC] = pc;
139         } else {
140             env->sregs[EPC1] = pc;
141         }
142         vector = EXC_DOUBLE;
143     } else {
144         env->sregs[EPC1] = pc;
145         vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
146     }
147 
148     env->sregs[EXCCAUSE] = cause;
149     env->sregs[PS] |= PS_EXCM;
150 
151     HELPER(exception)(env, vector);
152 }
153 
154 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
155         uint32_t pc, uint32_t cause, uint32_t vaddr)
156 {
157     env->sregs[EXCVADDR] = vaddr;
158     HELPER(exception_cause)(env, pc, cause);
159 }
160 
161 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
162 {
163     if (xtensa_get_cintlevel(env) < env->config->debug_level) {
164         HELPER(debug_exception)(env, env->pc, cause);
165     }
166 }
167 
168 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
169 {
170     unsigned level = env->config->debug_level;
171 
172     env->pc = pc;
173     env->sregs[DEBUGCAUSE] = cause;
174     env->sregs[EPC1 + level - 1] = pc;
175     env->sregs[EPS2 + level - 2] = env->sregs[PS];
176     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
177         (level << PS_INTLEVEL_SHIFT);
178     HELPER(exception)(env, EXC_DEBUG);
179 }
180 
181 static void copy_window_from_phys(CPUXtensaState *env,
182         uint32_t window, uint32_t phys, uint32_t n)
183 {
184     assert(phys < env->config->nareg);
185     if (phys + n <= env->config->nareg) {
186         memcpy(env->regs + window, env->phys_regs + phys,
187                 n * sizeof(uint32_t));
188     } else {
189         uint32_t n1 = env->config->nareg - phys;
190         memcpy(env->regs + window, env->phys_regs + phys,
191                 n1 * sizeof(uint32_t));
192         memcpy(env->regs + window + n1, env->phys_regs,
193                 (n - n1) * sizeof(uint32_t));
194     }
195 }
196 
197 static void copy_phys_from_window(CPUXtensaState *env,
198         uint32_t phys, uint32_t window, uint32_t n)
199 {
200     assert(phys < env->config->nareg);
201     if (phys + n <= env->config->nareg) {
202         memcpy(env->phys_regs + phys, env->regs + window,
203                 n * sizeof(uint32_t));
204     } else {
205         uint32_t n1 = env->config->nareg - phys;
206         memcpy(env->phys_regs + phys, env->regs + window,
207                 n1 * sizeof(uint32_t));
208         memcpy(env->phys_regs, env->regs + window + n1,
209                 (n - n1) * sizeof(uint32_t));
210     }
211 }
212 
213 
214 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
215 {
216     return a & (env->config->nareg / 4 - 1);
217 }
218 
219 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
220 {
221     return 1 << windowbase_bound(a, env);
222 }
223 
224 void xtensa_sync_window_from_phys(CPUXtensaState *env)
225 {
226     copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
227 }
228 
229 void xtensa_sync_phys_from_window(CPUXtensaState *env)
230 {
231     copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
232 }
233 
234 static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
235 {
236     xtensa_sync_phys_from_window(env);
237     env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
238     xtensa_sync_window_from_phys(env);
239 }
240 
241 void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
242 {
243     xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
244 }
245 
246 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
247 {
248     xtensa_rotate_window_abs(env, v);
249 }
250 
251 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
252 {
253     int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
254     if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
255         qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
256                       pc, env->sregs[PS]);
257         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
258     } else {
259         uint32_t windowstart = xtensa_replicate_windowstart(env) >>
260             (env->sregs[WINDOW_BASE] + 1);
261 
262         if (windowstart & ((1 << callinc) - 1)) {
263             HELPER(window_check)(env, pc, callinc);
264         }
265         env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
266         xtensa_rotate_window(env, callinc);
267         env->sregs[WINDOW_START] |=
268             windowstart_bit(env->sregs[WINDOW_BASE], env);
269     }
270 }
271 
272 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
273 {
274     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
275     uint32_t windowstart = xtensa_replicate_windowstart(env) >>
276         (env->sregs[WINDOW_BASE] + 1);
277     uint32_t n = ctz32(windowstart) + 1;
278 
279     assert(n <= w);
280 
281     xtensa_rotate_window(env, n);
282     env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
283         (windowbase << PS_OWB_SHIFT) | PS_EXCM;
284     env->sregs[EPC1] = env->pc = pc;
285 
286     switch (ctz32(windowstart >> n)) {
287     case 0:
288         HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
289         break;
290     case 1:
291         HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
292         break;
293     default:
294         HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
295         break;
296     }
297 }
298 
299 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
300 {
301     int n = (env->regs[0] >> 30) & 0x3;
302     int m = 0;
303     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
304     uint32_t windowstart = env->sregs[WINDOW_START];
305     uint32_t ret_pc = 0;
306 
307     if (windowstart & windowstart_bit(windowbase - 1, env)) {
308         m = 1;
309     } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
310         m = 2;
311     } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
312         m = 3;
313     }
314 
315     if (n == 0 || (m != 0 && m != n) ||
316             ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
317         qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
318                       "PS = %08x, m = %d, n = %d\n",
319                       pc, env->sregs[PS], m, n);
320         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
321     } else {
322         int owb = windowbase;
323 
324         ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
325 
326         xtensa_rotate_window(env, -n);
327         if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
328             env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
329         } else {
330             /* window underflow */
331             env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
332                 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
333             env->sregs[EPC1] = env->pc = pc;
334 
335             if (n == 1) {
336                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
337             } else if (n == 2) {
338                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
339             } else if (n == 3) {
340                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
341             }
342         }
343     }
344     return ret_pc;
345 }
346 
347 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
348 {
349     xtensa_rotate_window(env, imm4);
350 }
351 
352 void xtensa_restore_owb(CPUXtensaState *env)
353 {
354     xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
355 }
356 
357 void HELPER(restore_owb)(CPUXtensaState *env)
358 {
359     xtensa_restore_owb(env);
360 }
361 
362 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
363 {
364     if ((env->sregs[WINDOW_START] &
365             (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
366              windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
367              windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
368         HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
369     }
370 }
371 
372 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
373 {
374     if (env->sregs[LBEG] != v) {
375         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
376         env->sregs[LBEG] = v;
377     }
378 }
379 
380 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
381 {
382     if (env->sregs[LEND] != v) {
383         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
384         env->sregs[LEND] = v;
385         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
386     }
387 }
388 
389 void HELPER(dump_state)(CPUXtensaState *env)
390 {
391     XtensaCPU *cpu = xtensa_env_get_cpu(env);
392 
393     cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
394 }
395 
396 #ifndef CONFIG_USER_ONLY
397 
398 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
399 {
400     CPUState *cpu;
401 
402     env->pc = pc;
403     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
404         (intlevel << PS_INTLEVEL_SHIFT);
405 
406     qemu_mutex_lock_iothread();
407     check_interrupts(env);
408     qemu_mutex_unlock_iothread();
409 
410     if (env->pending_irq_level) {
411         cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
412         return;
413     }
414 
415     cpu = CPU(xtensa_env_get_cpu(env));
416     cpu->halted = 1;
417     HELPER(exception)(env, EXCP_HLT);
418 }
419 
420 void HELPER(update_ccount)(CPUXtensaState *env)
421 {
422     uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
423 
424     env->ccount_time = now;
425     env->sregs[CCOUNT] = env->ccount_base +
426         (uint32_t)((now - env->time_base) *
427                    env->config->clock_freq_khz / 1000000);
428 }
429 
430 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
431 {
432     int i;
433 
434     HELPER(update_ccount)(env);
435     env->ccount_base += v - env->sregs[CCOUNT];
436     for (i = 0; i < env->config->nccompare; ++i) {
437         HELPER(update_ccompare)(env, i);
438     }
439 }
440 
441 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
442 {
443     uint64_t dcc;
444 
445     HELPER(update_ccount)(env);
446     dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
447     timer_mod(env->ccompare[i].timer,
448               env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
449     env->yield_needed = 1;
450 }
451 
452 void HELPER(check_interrupts)(CPUXtensaState *env)
453 {
454     qemu_mutex_lock_iothread();
455     check_interrupts(env);
456     qemu_mutex_unlock_iothread();
457 }
458 
459 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
460 {
461     /*
462      * Attempt the memory load; we don't care about the result but
463      * only the side-effects (ie any MMU or other exception)
464      */
465     cpu_ldub_code_ra(env, vaddr, GETPC());
466 }
467 
468 /*!
469  * Check vaddr accessibility/cache attributes and raise an exception if
470  * specified by the ATOMCTL SR.
471  *
472  * Note: local memory exclusion is not implemented
473  */
474 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
475 {
476     uint32_t paddr, page_size, access;
477     uint32_t atomctl = env->sregs[ATOMCTL];
478     int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
479             xtensa_get_cring(env), &paddr, &page_size, &access);
480 
481     /*
482      * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
483      * see opcode description in the ISA
484      */
485     if (rc == 0 &&
486             (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
487         rc = STORE_PROHIBITED_CAUSE;
488     }
489 
490     if (rc) {
491         HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
492     }
493 
494     /*
495      * When data cache is not configured use ATOMCTL bypass field.
496      * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
497      * under the Conditional Store Option.
498      */
499     if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
500         access = PAGE_CACHE_BYPASS;
501     }
502 
503     switch (access & PAGE_CACHE_MASK) {
504     case PAGE_CACHE_WB:
505         atomctl >>= 2;
506         /* fall through */
507     case PAGE_CACHE_WT:
508         atomctl >>= 2;
509         /* fall through */
510     case PAGE_CACHE_BYPASS:
511         if ((atomctl & 0x3) == 0) {
512             HELPER(exception_cause_vaddr)(env, pc,
513                     LOAD_STORE_ERROR_CAUSE, vaddr);
514         }
515         break;
516 
517     case PAGE_CACHE_ISOLATE:
518         HELPER(exception_cause_vaddr)(env, pc,
519                 LOAD_STORE_ERROR_CAUSE, vaddr);
520         break;
521 
522     default:
523         break;
524     }
525 }
526 
527 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
528 {
529     if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
530         if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
531             env->config->icache_ways) {
532             deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
533                       env->config->icache_ways);
534         }
535     }
536     if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
537         if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
538             env->config->dcache_ways) {
539             deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
540                       env->config->dcache_ways);
541         }
542         if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
543             env->config->dcache_ways) {
544             deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
545                       env->config->dcache_ways);
546         }
547     }
548     env->sregs[MEMCTL] = v & env->config->memctl_mask;
549 }
550 
551 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
552 {
553     XtensaCPU *cpu = xtensa_env_get_cpu(env);
554 
555     v = (v & 0xffffff00) | 0x1;
556     if (v != env->sregs[RASID]) {
557         env->sregs[RASID] = v;
558         tlb_flush(CPU(cpu));
559     }
560 }
561 
562 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
563 {
564     uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
565 
566     switch (way) {
567     case 4:
568         return (tlbcfg >> 16) & 0x3;
569 
570     case 5:
571         return (tlbcfg >> 20) & 0x1;
572 
573     case 6:
574         return (tlbcfg >> 24) & 0x1;
575 
576     default:
577         return 0;
578     }
579 }
580 
581 /*!
582  * Get bit mask for the virtual address bits translated by the TLB way
583  */
584 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
585 {
586     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
587         bool varway56 = dtlb ?
588             env->config->dtlb.varway56 :
589             env->config->itlb.varway56;
590 
591         switch (way) {
592         case 4:
593             return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
594 
595         case 5:
596             if (varway56) {
597                 return 0xf8000000 << get_page_size(env, dtlb, way);
598             } else {
599                 return 0xf8000000;
600             }
601 
602         case 6:
603             if (varway56) {
604                 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
605             } else {
606                 return 0xf0000000;
607             }
608 
609         default:
610             return 0xfffff000;
611         }
612     } else {
613         return REGION_PAGE_MASK;
614     }
615 }
616 
617 /*!
618  * Get bit mask for the 'VPN without index' field.
619  * See ISA, 4.6.5.6, data format for RxTLB0
620  */
621 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
622 {
623     if (way < 4) {
624         bool is32 = (dtlb ?
625                 env->config->dtlb.nrefillentries :
626                 env->config->itlb.nrefillentries) == 32;
627         return is32 ? 0xffff8000 : 0xffffc000;
628     } else if (way == 4) {
629         return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
630     } else if (way <= 6) {
631         uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
632         bool varway56 = dtlb ?
633             env->config->dtlb.varway56 :
634             env->config->itlb.varway56;
635 
636         if (varway56) {
637             return mask << (way == 5 ? 2 : 3);
638         } else {
639             return mask << 1;
640         }
641     } else {
642         return 0xfffff000;
643     }
644 }
645 
646 /*!
647  * Split virtual address into VPN (with index) and entry index
648  * for the given TLB way
649  */
650 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
651         uint32_t *vpn, uint32_t wi, uint32_t *ei)
652 {
653     bool varway56 = dtlb ?
654         env->config->dtlb.varway56 :
655         env->config->itlb.varway56;
656 
657     if (!dtlb) {
658         wi &= 7;
659     }
660 
661     if (wi < 4) {
662         bool is32 = (dtlb ?
663                 env->config->dtlb.nrefillentries :
664                 env->config->itlb.nrefillentries) == 32;
665         *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
666     } else {
667         switch (wi) {
668         case 4:
669             {
670                 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
671                 *ei = (v >> eibase) & 0x3;
672             }
673             break;
674 
675         case 5:
676             if (varway56) {
677                 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
678                 *ei = (v >> eibase) & 0x3;
679             } else {
680                 *ei = (v >> 27) & 0x1;
681             }
682             break;
683 
684         case 6:
685             if (varway56) {
686                 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
687                 *ei = (v >> eibase) & 0x7;
688             } else {
689                 *ei = (v >> 28) & 0x1;
690             }
691             break;
692 
693         default:
694             *ei = 0;
695             break;
696         }
697     }
698     *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
699 }
700 
701 /*!
702  * Split TLB address into TLB way, entry index and VPN (with index).
703  * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
704  */
705 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
706         uint32_t *vpn, uint32_t *wi, uint32_t *ei)
707 {
708     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
709         *wi = v & (dtlb ? 0xf : 0x7);
710         split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
711     } else {
712         *vpn = v & REGION_PAGE_MASK;
713         *wi = 0;
714         *ei = (v >> 29) & 0x7;
715     }
716 }
717 
718 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
719         uint32_t v, bool dtlb, uint32_t *pwi)
720 {
721     uint32_t vpn;
722     uint32_t wi;
723     uint32_t ei;
724 
725     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
726     if (pwi) {
727         *pwi = wi;
728     }
729     return xtensa_tlb_get_entry(env, dtlb, wi, ei);
730 }
731 
732 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
733 {
734     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
735         uint32_t wi;
736         const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
737         return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
738     } else {
739         return v & REGION_PAGE_MASK;
740     }
741 }
742 
743 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
744 {
745     const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
746     return entry->paddr | entry->attr;
747 }
748 
749 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
750 {
751     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
752         uint32_t wi;
753         xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
754         if (entry->variable && entry->asid) {
755             tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
756             entry->asid = 0;
757         }
758     }
759 }
760 
761 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
762 {
763     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
764         uint32_t wi;
765         uint32_t ei;
766         uint8_t ring;
767         int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
768 
769         switch (res) {
770         case 0:
771             if (ring >= xtensa_get_ring(env)) {
772                 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
773             }
774             break;
775 
776         case INST_TLB_MULTI_HIT_CAUSE:
777         case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
778             HELPER(exception_cause_vaddr)(env, env->pc, res, v);
779             break;
780         }
781         return 0;
782     } else {
783         return (v & REGION_PAGE_MASK) | 0x1;
784     }
785 }
786 
787 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
788         xtensa_tlb_entry *entry, bool dtlb,
789         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
790 {
791     entry->vaddr = vpn;
792     entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
793     entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
794     entry->attr = pte & 0xf;
795 }
796 
797 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
798         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
799 {
800     XtensaCPU *cpu = xtensa_env_get_cpu(env);
801     CPUState *cs = CPU(cpu);
802     xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
803 
804     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
805         if (entry->variable) {
806             if (entry->asid) {
807                 tlb_flush_page(cs, entry->vaddr);
808             }
809             xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
810             tlb_flush_page(cs, entry->vaddr);
811         } else {
812             qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
813                           __func__, dtlb, wi, ei);
814         }
815     } else {
816         tlb_flush_page(cs, entry->vaddr);
817         if (xtensa_option_enabled(env->config,
818                     XTENSA_OPTION_REGION_TRANSLATION)) {
819             entry->paddr = pte & REGION_PAGE_MASK;
820         }
821         entry->attr = pte & 0xf;
822     }
823 }
824 
825 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
826 {
827     uint32_t vpn;
828     uint32_t wi;
829     uint32_t ei;
830     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
831     xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
832 }
833 
834 
835 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
836 {
837     uint32_t change = v ^ env->sregs[IBREAKENABLE];
838     unsigned i;
839 
840     for (i = 0; i < env->config->nibreak; ++i) {
841         if (change & (1 << i)) {
842             tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
843         }
844     }
845     env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
846 }
847 
848 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
849 {
850     if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
851         tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
852         tb_invalidate_virtual_addr(env, v);
853     }
854     env->sregs[IBREAKA + i] = v;
855 }
856 
857 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
858         uint32_t dbreakc)
859 {
860     CPUState *cs = CPU(xtensa_env_get_cpu(env));
861     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
862     uint32_t mask = dbreakc | ~DBREAKC_MASK;
863 
864     if (env->cpu_watchpoint[i]) {
865         cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
866     }
867     if (dbreakc & DBREAKC_SB) {
868         flags |= BP_MEM_WRITE;
869     }
870     if (dbreakc & DBREAKC_LB) {
871         flags |= BP_MEM_READ;
872     }
873     /* contiguous mask after inversion is one less than some power of 2 */
874     if ((~mask + 1) & ~mask) {
875         qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
876         /* cut mask after the first zero bit */
877         mask = 0xffffffff << (32 - clo32(mask));
878     }
879     if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
880             flags, &env->cpu_watchpoint[i])) {
881         env->cpu_watchpoint[i] = NULL;
882         qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
883                       dbreaka & mask, ~mask + 1);
884     }
885 }
886 
887 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
888 {
889     uint32_t dbreakc = env->sregs[DBREAKC + i];
890 
891     if ((dbreakc & DBREAKC_SB_LB) &&
892             env->sregs[DBREAKA + i] != v) {
893         set_dbreak(env, i, v, dbreakc);
894     }
895     env->sregs[DBREAKA + i] = v;
896 }
897 
898 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
899 {
900     if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
901         if (v & DBREAKC_SB_LB) {
902             set_dbreak(env, i, env->sregs[DBREAKA + i], v);
903         } else {
904             if (env->cpu_watchpoint[i]) {
905                 CPUState *cs = CPU(xtensa_env_get_cpu(env));
906 
907                 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
908                 env->cpu_watchpoint[i] = NULL;
909             }
910         }
911     }
912     env->sregs[DBREAKC + i] = v;
913 }
914 #endif
915 
916 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
917 {
918     static const int rounding_mode[] = {
919         float_round_nearest_even,
920         float_round_to_zero,
921         float_round_up,
922         float_round_down,
923     };
924 
925     env->uregs[FCR] = v & 0xfffff07f;
926     set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
927 }
928 
929 float32 HELPER(abs_s)(float32 v)
930 {
931     return float32_abs(v);
932 }
933 
934 float32 HELPER(neg_s)(float32 v)
935 {
936     return float32_chs(v);
937 }
938 
939 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
940 {
941     return float32_add(a, b, &env->fp_status);
942 }
943 
944 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
945 {
946     return float32_sub(a, b, &env->fp_status);
947 }
948 
949 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
950 {
951     return float32_mul(a, b, &env->fp_status);
952 }
953 
954 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
955 {
956     return float32_muladd(b, c, a, 0,
957             &env->fp_status);
958 }
959 
960 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
961 {
962     return float32_muladd(b, c, a, float_muladd_negate_product,
963             &env->fp_status);
964 }
965 
966 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
967 {
968     float_status fp_status = {0};
969 
970     set_float_rounding_mode(rounding_mode, &fp_status);
971     return float32_to_int32(
972             float32_scalbn(v, scale, &fp_status), &fp_status);
973 }
974 
975 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
976 {
977     float_status fp_status = {0};
978     float32 res;
979 
980     set_float_rounding_mode(rounding_mode, &fp_status);
981 
982     res = float32_scalbn(v, scale, &fp_status);
983 
984     if (float32_is_neg(v) && !float32_is_any_nan(v)) {
985         return float32_to_int32(res, &fp_status);
986     } else {
987         return float32_to_uint32(res, &fp_status);
988     }
989 }
990 
991 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
992 {
993     return float32_scalbn(int32_to_float32(v, &env->fp_status),
994             (int32_t)scale, &env->fp_status);
995 }
996 
997 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
998 {
999     return float32_scalbn(uint32_to_float32(v, &env->fp_status),
1000             (int32_t)scale, &env->fp_status);
1001 }
1002 
1003 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
1004 {
1005     if (v) {
1006         env->sregs[BR] |= br;
1007     } else {
1008         env->sregs[BR] &= ~br;
1009     }
1010 }
1011 
1012 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1013 {
1014     set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
1015 }
1016 
1017 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1018 {
1019     set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
1020 }
1021 
1022 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1023 {
1024     int v = float32_compare_quiet(a, b, &env->fp_status);
1025     set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
1026 }
1027 
1028 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1029 {
1030     set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
1031 }
1032 
1033 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1034 {
1035     int v = float32_compare_quiet(a, b, &env->fp_status);
1036     set_br(env, v == float_relation_less || v == float_relation_unordered, br);
1037 }
1038 
1039 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1040 {
1041     set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
1042 }
1043 
1044 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1045 {
1046     int v = float32_compare_quiet(a, b, &env->fp_status);
1047     set_br(env, v != float_relation_greater, br);
1048 }
1049 
1050 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
1051 {
1052 #ifndef CONFIG_USER_ONLY
1053     return address_space_ldl(env->address_space_er, addr,
1054                              MEMTXATTRS_UNSPECIFIED, NULL);
1055 #else
1056     return 0;
1057 #endif
1058 }
1059 
1060 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
1061 {
1062 #ifndef CONFIG_USER_ONLY
1063     address_space_stl(env->address_space_er, addr, data,
1064                       MEMTXATTRS_UNSPECIFIED, NULL);
1065 #endif
1066 }
1067