xref: /openbmc/qemu/target/xtensa/op_helper.c (revision 506e4a00)
1 /*
2  * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *     * Redistributions of source code must retain the above copyright
8  *       notice, this list of conditions and the following disclaimer.
9  *     * Redistributions in binary form must reproduce the above copyright
10  *       notice, this list of conditions and the following disclaimer in the
11  *       documentation and/or other materials provided with the distribution.
12  *     * Neither the name of the Open Source and Linux Lab nor the
13  *       names of its contributors may be used to endorse or promote products
14  *       derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "qemu/osdep.h"
29 #include "qemu/main-loop.h"
30 #include "cpu.h"
31 #include "exec/helper-proto.h"
32 #include "qemu/host-utils.h"
33 #include "exec/exec-all.h"
34 #include "exec/cpu_ldst.h"
35 #include "exec/address-spaces.h"
36 #include "qemu/timer.h"
37 #include "fpu/softfloat.h"
38 
39 #ifndef CONFIG_USER_ONLY
40 
41 void xtensa_cpu_do_unaligned_access(CPUState *cs,
42         vaddr addr, MMUAccessType access_type,
43         int mmu_idx, uintptr_t retaddr)
44 {
45     XtensaCPU *cpu = XTENSA_CPU(cs);
46     CPUXtensaState *env = &cpu->env;
47 
48     if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
49             !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
50         cpu_restore_state(CPU(cpu), retaddr, true);
51         HELPER(exception_cause_vaddr)(env,
52                 env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
53     }
54 }
55 
56 void tlb_fill(CPUState *cs, target_ulong vaddr, int size,
57               MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
58 {
59     XtensaCPU *cpu = XTENSA_CPU(cs);
60     CPUXtensaState *env = &cpu->env;
61     uint32_t paddr;
62     uint32_t page_size;
63     unsigned access;
64     int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
65             &paddr, &page_size, &access);
66 
67     qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
68                   __func__, vaddr, access_type, mmu_idx, paddr, ret);
69 
70     if (ret == 0) {
71         tlb_set_page(cs,
72                      vaddr & TARGET_PAGE_MASK,
73                      paddr & TARGET_PAGE_MASK,
74                      access, mmu_idx, page_size);
75     } else {
76         cpu_restore_state(cs, retaddr, true);
77         HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
78     }
79 }
80 
81 void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr,
82                                       unsigned size, MMUAccessType access_type,
83                                       int mmu_idx, MemTxAttrs attrs,
84                                       MemTxResult response, uintptr_t retaddr)
85 {
86     XtensaCPU *cpu = XTENSA_CPU(cs);
87     CPUXtensaState *env = &cpu->env;
88 
89     cpu_restore_state(cs, retaddr, true);
90     HELPER(exception_cause_vaddr)(env, env->pc,
91                                   access_type == MMU_INST_FETCH ?
92                                   INSTR_PIF_ADDR_ERROR_CAUSE :
93                                   LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
94                                   addr);
95 }
96 
97 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
98 {
99     uint32_t paddr;
100     uint32_t page_size;
101     unsigned access;
102     int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
103             &paddr, &page_size, &access);
104     if (ret == 0) {
105         tb_invalidate_phys_addr(&address_space_memory, paddr,
106                                 MEMTXATTRS_UNSPECIFIED);
107     }
108 }
109 
110 #else
111 
112 static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
113 {
114     tb_invalidate_phys_addr(vaddr);
115 }
116 
117 #endif
118 
119 void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
120 {
121     CPUState *cs = CPU(xtensa_env_get_cpu(env));
122 
123     cs->exception_index = excp;
124     if (excp == EXCP_YIELD) {
125         env->yield_needed = 0;
126     }
127     if (excp == EXCP_DEBUG) {
128         env->exception_taken = 0;
129     }
130     cpu_loop_exit(cs);
131 }
132 
133 void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
134 {
135     uint32_t vector;
136 
137     env->pc = pc;
138     if (env->sregs[PS] & PS_EXCM) {
139         if (env->config->ndepc) {
140             env->sregs[DEPC] = pc;
141         } else {
142             env->sregs[EPC1] = pc;
143         }
144         vector = EXC_DOUBLE;
145     } else {
146         env->sregs[EPC1] = pc;
147         vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
148     }
149 
150     env->sregs[EXCCAUSE] = cause;
151     env->sregs[PS] |= PS_EXCM;
152 
153     HELPER(exception)(env, vector);
154 }
155 
156 void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
157         uint32_t pc, uint32_t cause, uint32_t vaddr)
158 {
159     env->sregs[EXCVADDR] = vaddr;
160     HELPER(exception_cause)(env, pc, cause);
161 }
162 
163 void debug_exception_env(CPUXtensaState *env, uint32_t cause)
164 {
165     if (xtensa_get_cintlevel(env) < env->config->debug_level) {
166         HELPER(debug_exception)(env, env->pc, cause);
167     }
168 }
169 
170 void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
171 {
172     unsigned level = env->config->debug_level;
173 
174     env->pc = pc;
175     env->sregs[DEBUGCAUSE] = cause;
176     env->sregs[EPC1 + level - 1] = pc;
177     env->sregs[EPS2 + level - 2] = env->sregs[PS];
178     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
179         (level << PS_INTLEVEL_SHIFT);
180     HELPER(exception)(env, EXC_DEBUG);
181 }
182 
183 static void copy_window_from_phys(CPUXtensaState *env,
184         uint32_t window, uint32_t phys, uint32_t n)
185 {
186     assert(phys < env->config->nareg);
187     if (phys + n <= env->config->nareg) {
188         memcpy(env->regs + window, env->phys_regs + phys,
189                 n * sizeof(uint32_t));
190     } else {
191         uint32_t n1 = env->config->nareg - phys;
192         memcpy(env->regs + window, env->phys_regs + phys,
193                 n1 * sizeof(uint32_t));
194         memcpy(env->regs + window + n1, env->phys_regs,
195                 (n - n1) * sizeof(uint32_t));
196     }
197 }
198 
199 static void copy_phys_from_window(CPUXtensaState *env,
200         uint32_t phys, uint32_t window, uint32_t n)
201 {
202     assert(phys < env->config->nareg);
203     if (phys + n <= env->config->nareg) {
204         memcpy(env->phys_regs + phys, env->regs + window,
205                 n * sizeof(uint32_t));
206     } else {
207         uint32_t n1 = env->config->nareg - phys;
208         memcpy(env->phys_regs + phys, env->regs + window,
209                 n1 * sizeof(uint32_t));
210         memcpy(env->phys_regs, env->regs + window + n1,
211                 (n - n1) * sizeof(uint32_t));
212     }
213 }
214 
215 
216 static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
217 {
218     return a & (env->config->nareg / 4 - 1);
219 }
220 
221 static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
222 {
223     return 1 << windowbase_bound(a, env);
224 }
225 
226 void xtensa_sync_window_from_phys(CPUXtensaState *env)
227 {
228     copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
229 }
230 
231 void xtensa_sync_phys_from_window(CPUXtensaState *env)
232 {
233     copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
234 }
235 
236 static void xtensa_rotate_window_abs(CPUXtensaState *env, uint32_t position)
237 {
238     xtensa_sync_phys_from_window(env);
239     env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
240     xtensa_sync_window_from_phys(env);
241 }
242 
243 void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta)
244 {
245     xtensa_rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
246 }
247 
248 void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
249 {
250     xtensa_rotate_window_abs(env, v);
251 }
252 
253 void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
254 {
255     int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
256     if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
257         qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
258                       pc, env->sregs[PS]);
259         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
260     } else {
261         uint32_t windowstart = xtensa_replicate_windowstart(env) >>
262             (env->sregs[WINDOW_BASE] + 1);
263 
264         if (windowstart & ((1 << callinc) - 1)) {
265             HELPER(window_check)(env, pc, callinc);
266         }
267         env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - imm;
268         xtensa_rotate_window(env, callinc);
269         env->sregs[WINDOW_START] |=
270             windowstart_bit(env->sregs[WINDOW_BASE], env);
271     }
272 }
273 
274 void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
275 {
276     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
277     uint32_t windowstart = xtensa_replicate_windowstart(env) >>
278         (env->sregs[WINDOW_BASE] + 1);
279     uint32_t n = ctz32(windowstart) + 1;
280 
281     assert(n <= w);
282 
283     xtensa_rotate_window(env, n);
284     env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
285         (windowbase << PS_OWB_SHIFT) | PS_EXCM;
286     env->sregs[EPC1] = env->pc = pc;
287 
288     switch (ctz32(windowstart >> n)) {
289     case 0:
290         HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
291         break;
292     case 1:
293         HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
294         break;
295     default:
296         HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
297         break;
298     }
299 }
300 
301 uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
302 {
303     int n = (env->regs[0] >> 30) & 0x3;
304     int m = 0;
305     uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
306     uint32_t windowstart = env->sregs[WINDOW_START];
307     uint32_t ret_pc = 0;
308 
309     if (windowstart & windowstart_bit(windowbase - 1, env)) {
310         m = 1;
311     } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
312         m = 2;
313     } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
314         m = 3;
315     }
316 
317     if (n == 0 || (m != 0 && m != n) ||
318             ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
319         qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
320                       "PS = %08x, m = %d, n = %d\n",
321                       pc, env->sregs[PS], m, n);
322         HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
323     } else {
324         int owb = windowbase;
325 
326         ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
327 
328         xtensa_rotate_window(env, -n);
329         if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
330             env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
331         } else {
332             /* window underflow */
333             env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
334                 (windowbase << PS_OWB_SHIFT) | PS_EXCM;
335             env->sregs[EPC1] = env->pc = pc;
336 
337             if (n == 1) {
338                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
339             } else if (n == 2) {
340                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
341             } else if (n == 3) {
342                 HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
343             }
344         }
345     }
346     return ret_pc;
347 }
348 
349 void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
350 {
351     xtensa_rotate_window(env, imm4);
352 }
353 
354 void xtensa_restore_owb(CPUXtensaState *env)
355 {
356     xtensa_rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
357 }
358 
359 void HELPER(restore_owb)(CPUXtensaState *env)
360 {
361     xtensa_restore_owb(env);
362 }
363 
364 void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
365 {
366     if ((env->sregs[WINDOW_START] &
367             (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
368              windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
369              windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
370         HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
371     }
372 }
373 
374 void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
375 {
376     if (env->sregs[LBEG] != v) {
377         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
378         env->sregs[LBEG] = v;
379     }
380 }
381 
382 void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
383 {
384     if (env->sregs[LEND] != v) {
385         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
386         env->sregs[LEND] = v;
387         tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
388     }
389 }
390 
391 void HELPER(dump_state)(CPUXtensaState *env)
392 {
393     XtensaCPU *cpu = xtensa_env_get_cpu(env);
394 
395     cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
396 }
397 
398 #ifndef CONFIG_USER_ONLY
399 
400 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
401 {
402     CPUState *cpu;
403 
404     env->pc = pc;
405     env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
406         (intlevel << PS_INTLEVEL_SHIFT);
407 
408     qemu_mutex_lock_iothread();
409     check_interrupts(env);
410     qemu_mutex_unlock_iothread();
411 
412     if (env->pending_irq_level) {
413         cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
414         return;
415     }
416 
417     cpu = CPU(xtensa_env_get_cpu(env));
418     cpu->halted = 1;
419     HELPER(exception)(env, EXCP_HLT);
420 }
421 
422 void HELPER(update_ccount)(CPUXtensaState *env)
423 {
424     uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
425 
426     env->ccount_time = now;
427     env->sregs[CCOUNT] = env->ccount_base +
428         (uint32_t)((now - env->time_base) *
429                    env->config->clock_freq_khz / 1000000);
430 }
431 
432 void HELPER(wsr_ccount)(CPUXtensaState *env, uint32_t v)
433 {
434     int i;
435 
436     HELPER(update_ccount)(env);
437     env->ccount_base += v - env->sregs[CCOUNT];
438     for (i = 0; i < env->config->nccompare; ++i) {
439         HELPER(update_ccompare)(env, i);
440     }
441 }
442 
443 void HELPER(update_ccompare)(CPUXtensaState *env, uint32_t i)
444 {
445     uint64_t dcc;
446 
447     HELPER(update_ccount)(env);
448     dcc = (uint64_t)(env->sregs[CCOMPARE + i] - env->sregs[CCOUNT] - 1) + 1;
449     timer_mod(env->ccompare[i].timer,
450               env->ccount_time + (dcc * 1000000) / env->config->clock_freq_khz);
451     env->yield_needed = 1;
452 }
453 
454 void HELPER(check_interrupts)(CPUXtensaState *env)
455 {
456     qemu_mutex_lock_iothread();
457     check_interrupts(env);
458     qemu_mutex_unlock_iothread();
459 }
460 
461 void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
462 {
463     /*
464      * Attempt the memory load; we don't care about the result but
465      * only the side-effects (ie any MMU or other exception)
466      */
467     cpu_ldub_code_ra(env, vaddr, GETPC());
468 }
469 
470 /*!
471  * Check vaddr accessibility/cache attributes and raise an exception if
472  * specified by the ATOMCTL SR.
473  *
474  * Note: local memory exclusion is not implemented
475  */
476 void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
477 {
478     uint32_t paddr, page_size, access;
479     uint32_t atomctl = env->sregs[ATOMCTL];
480     int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
481             xtensa_get_cring(env), &paddr, &page_size, &access);
482 
483     /*
484      * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
485      * see opcode description in the ISA
486      */
487     if (rc == 0 &&
488             (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
489         rc = STORE_PROHIBITED_CAUSE;
490     }
491 
492     if (rc) {
493         HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
494     }
495 
496     /*
497      * When data cache is not configured use ATOMCTL bypass field.
498      * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
499      * under the Conditional Store Option.
500      */
501     if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
502         access = PAGE_CACHE_BYPASS;
503     }
504 
505     switch (access & PAGE_CACHE_MASK) {
506     case PAGE_CACHE_WB:
507         atomctl >>= 2;
508         /* fall through */
509     case PAGE_CACHE_WT:
510         atomctl >>= 2;
511         /* fall through */
512     case PAGE_CACHE_BYPASS:
513         if ((atomctl & 0x3) == 0) {
514             HELPER(exception_cause_vaddr)(env, pc,
515                     LOAD_STORE_ERROR_CAUSE, vaddr);
516         }
517         break;
518 
519     case PAGE_CACHE_ISOLATE:
520         HELPER(exception_cause_vaddr)(env, pc,
521                 LOAD_STORE_ERROR_CAUSE, vaddr);
522         break;
523 
524     default:
525         break;
526     }
527 }
528 
529 void HELPER(wsr_memctl)(CPUXtensaState *env, uint32_t v)
530 {
531     if (xtensa_option_enabled(env->config, XTENSA_OPTION_ICACHE)) {
532         if (extract32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN) >
533             env->config->icache_ways) {
534             deposit32(v, MEMCTL_IUSEWAYS_SHIFT, MEMCTL_IUSEWAYS_LEN,
535                       env->config->icache_ways);
536         }
537     }
538     if (xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
539         if (extract32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN) >
540             env->config->dcache_ways) {
541             deposit32(v, MEMCTL_DUSEWAYS_SHIFT, MEMCTL_DUSEWAYS_LEN,
542                       env->config->dcache_ways);
543         }
544         if (extract32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN) >
545             env->config->dcache_ways) {
546             deposit32(v, MEMCTL_DALLOCWAYS_SHIFT, MEMCTL_DALLOCWAYS_LEN,
547                       env->config->dcache_ways);
548         }
549     }
550     env->sregs[MEMCTL] = v & env->config->memctl_mask;
551 }
552 
553 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
554 {
555     XtensaCPU *cpu = xtensa_env_get_cpu(env);
556 
557     v = (v & 0xffffff00) | 0x1;
558     if (v != env->sregs[RASID]) {
559         env->sregs[RASID] = v;
560         tlb_flush(CPU(cpu));
561     }
562 }
563 
564 static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
565 {
566     uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
567 
568     switch (way) {
569     case 4:
570         return (tlbcfg >> 16) & 0x3;
571 
572     case 5:
573         return (tlbcfg >> 20) & 0x1;
574 
575     case 6:
576         return (tlbcfg >> 24) & 0x1;
577 
578     default:
579         return 0;
580     }
581 }
582 
583 /*!
584  * Get bit mask for the virtual address bits translated by the TLB way
585  */
586 uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
587 {
588     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
589         bool varway56 = dtlb ?
590             env->config->dtlb.varway56 :
591             env->config->itlb.varway56;
592 
593         switch (way) {
594         case 4:
595             return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
596 
597         case 5:
598             if (varway56) {
599                 return 0xf8000000 << get_page_size(env, dtlb, way);
600             } else {
601                 return 0xf8000000;
602             }
603 
604         case 6:
605             if (varway56) {
606                 return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
607             } else {
608                 return 0xf0000000;
609             }
610 
611         default:
612             return 0xfffff000;
613         }
614     } else {
615         return REGION_PAGE_MASK;
616     }
617 }
618 
619 /*!
620  * Get bit mask for the 'VPN without index' field.
621  * See ISA, 4.6.5.6, data format for RxTLB0
622  */
623 static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
624 {
625     if (way < 4) {
626         bool is32 = (dtlb ?
627                 env->config->dtlb.nrefillentries :
628                 env->config->itlb.nrefillentries) == 32;
629         return is32 ? 0xffff8000 : 0xffffc000;
630     } else if (way == 4) {
631         return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
632     } else if (way <= 6) {
633         uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
634         bool varway56 = dtlb ?
635             env->config->dtlb.varway56 :
636             env->config->itlb.varway56;
637 
638         if (varway56) {
639             return mask << (way == 5 ? 2 : 3);
640         } else {
641             return mask << 1;
642         }
643     } else {
644         return 0xfffff000;
645     }
646 }
647 
648 /*!
649  * Split virtual address into VPN (with index) and entry index
650  * for the given TLB way
651  */
652 void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
653         uint32_t *vpn, uint32_t wi, uint32_t *ei)
654 {
655     bool varway56 = dtlb ?
656         env->config->dtlb.varway56 :
657         env->config->itlb.varway56;
658 
659     if (!dtlb) {
660         wi &= 7;
661     }
662 
663     if (wi < 4) {
664         bool is32 = (dtlb ?
665                 env->config->dtlb.nrefillentries :
666                 env->config->itlb.nrefillentries) == 32;
667         *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
668     } else {
669         switch (wi) {
670         case 4:
671             {
672                 uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
673                 *ei = (v >> eibase) & 0x3;
674             }
675             break;
676 
677         case 5:
678             if (varway56) {
679                 uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
680                 *ei = (v >> eibase) & 0x3;
681             } else {
682                 *ei = (v >> 27) & 0x1;
683             }
684             break;
685 
686         case 6:
687             if (varway56) {
688                 uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
689                 *ei = (v >> eibase) & 0x7;
690             } else {
691                 *ei = (v >> 28) & 0x1;
692             }
693             break;
694 
695         default:
696             *ei = 0;
697             break;
698         }
699     }
700     *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
701 }
702 
703 /*!
704  * Split TLB address into TLB way, entry index and VPN (with index).
705  * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
706  */
707 static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
708         uint32_t *vpn, uint32_t *wi, uint32_t *ei)
709 {
710     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
711         *wi = v & (dtlb ? 0xf : 0x7);
712         split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
713     } else {
714         *vpn = v & REGION_PAGE_MASK;
715         *wi = 0;
716         *ei = (v >> 29) & 0x7;
717     }
718 }
719 
720 static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
721         uint32_t v, bool dtlb, uint32_t *pwi)
722 {
723     uint32_t vpn;
724     uint32_t wi;
725     uint32_t ei;
726 
727     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
728     if (pwi) {
729         *pwi = wi;
730     }
731     return xtensa_tlb_get_entry(env, dtlb, wi, ei);
732 }
733 
734 uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
735 {
736     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
737         uint32_t wi;
738         const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
739         return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
740     } else {
741         return v & REGION_PAGE_MASK;
742     }
743 }
744 
745 uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
746 {
747     const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
748     return entry->paddr | entry->attr;
749 }
750 
751 void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
752 {
753     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
754         uint32_t wi;
755         xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
756         if (entry->variable && entry->asid) {
757             tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
758             entry->asid = 0;
759         }
760     }
761 }
762 
763 uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
764 {
765     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
766         uint32_t wi;
767         uint32_t ei;
768         uint8_t ring;
769         int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
770 
771         switch (res) {
772         case 0:
773             if (ring >= xtensa_get_ring(env)) {
774                 return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
775             }
776             break;
777 
778         case INST_TLB_MULTI_HIT_CAUSE:
779         case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
780             HELPER(exception_cause_vaddr)(env, env->pc, res, v);
781             break;
782         }
783         return 0;
784     } else {
785         return (v & REGION_PAGE_MASK) | 0x1;
786     }
787 }
788 
789 void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
790         xtensa_tlb_entry *entry, bool dtlb,
791         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
792 {
793     entry->vaddr = vpn;
794     entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
795     entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
796     entry->attr = pte & 0xf;
797 }
798 
799 void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
800         unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
801 {
802     XtensaCPU *cpu = xtensa_env_get_cpu(env);
803     CPUState *cs = CPU(cpu);
804     xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
805 
806     if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
807         if (entry->variable) {
808             if (entry->asid) {
809                 tlb_flush_page(cs, entry->vaddr);
810             }
811             xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
812             tlb_flush_page(cs, entry->vaddr);
813         } else {
814             qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
815                           __func__, dtlb, wi, ei);
816         }
817     } else {
818         tlb_flush_page(cs, entry->vaddr);
819         if (xtensa_option_enabled(env->config,
820                     XTENSA_OPTION_REGION_TRANSLATION)) {
821             entry->paddr = pte & REGION_PAGE_MASK;
822         }
823         entry->attr = pte & 0xf;
824     }
825 }
826 
827 void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
828 {
829     uint32_t vpn;
830     uint32_t wi;
831     uint32_t ei;
832     split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
833     xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
834 }
835 
836 
837 void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
838 {
839     uint32_t change = v ^ env->sregs[IBREAKENABLE];
840     unsigned i;
841 
842     for (i = 0; i < env->config->nibreak; ++i) {
843         if (change & (1 << i)) {
844             tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
845         }
846     }
847     env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
848 }
849 
850 void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
851 {
852     if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
853         tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
854         tb_invalidate_virtual_addr(env, v);
855     }
856     env->sregs[IBREAKA + i] = v;
857 }
858 
859 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
860         uint32_t dbreakc)
861 {
862     CPUState *cs = CPU(xtensa_env_get_cpu(env));
863     int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
864     uint32_t mask = dbreakc | ~DBREAKC_MASK;
865 
866     if (env->cpu_watchpoint[i]) {
867         cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
868     }
869     if (dbreakc & DBREAKC_SB) {
870         flags |= BP_MEM_WRITE;
871     }
872     if (dbreakc & DBREAKC_LB) {
873         flags |= BP_MEM_READ;
874     }
875     /* contiguous mask after inversion is one less than some power of 2 */
876     if ((~mask + 1) & ~mask) {
877         qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
878         /* cut mask after the first zero bit */
879         mask = 0xffffffff << (32 - clo32(mask));
880     }
881     if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
882             flags, &env->cpu_watchpoint[i])) {
883         env->cpu_watchpoint[i] = NULL;
884         qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
885                       dbreaka & mask, ~mask + 1);
886     }
887 }
888 
889 void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
890 {
891     uint32_t dbreakc = env->sregs[DBREAKC + i];
892 
893     if ((dbreakc & DBREAKC_SB_LB) &&
894             env->sregs[DBREAKA + i] != v) {
895         set_dbreak(env, i, v, dbreakc);
896     }
897     env->sregs[DBREAKA + i] = v;
898 }
899 
900 void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
901 {
902     if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
903         if (v & DBREAKC_SB_LB) {
904             set_dbreak(env, i, env->sregs[DBREAKA + i], v);
905         } else {
906             if (env->cpu_watchpoint[i]) {
907                 CPUState *cs = CPU(xtensa_env_get_cpu(env));
908 
909                 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
910                 env->cpu_watchpoint[i] = NULL;
911             }
912         }
913     }
914     env->sregs[DBREAKC + i] = v;
915 }
916 #endif
917 
918 void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
919 {
920     static const int rounding_mode[] = {
921         float_round_nearest_even,
922         float_round_to_zero,
923         float_round_up,
924         float_round_down,
925     };
926 
927     env->uregs[FCR] = v & 0xfffff07f;
928     set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
929 }
930 
931 float32 HELPER(abs_s)(float32 v)
932 {
933     return float32_abs(v);
934 }
935 
936 float32 HELPER(neg_s)(float32 v)
937 {
938     return float32_chs(v);
939 }
940 
941 float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
942 {
943     return float32_add(a, b, &env->fp_status);
944 }
945 
946 float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
947 {
948     return float32_sub(a, b, &env->fp_status);
949 }
950 
951 float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
952 {
953     return float32_mul(a, b, &env->fp_status);
954 }
955 
956 float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
957 {
958     return float32_muladd(b, c, a, 0,
959             &env->fp_status);
960 }
961 
962 float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
963 {
964     return float32_muladd(b, c, a, float_muladd_negate_product,
965             &env->fp_status);
966 }
967 
968 uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
969 {
970     float_status fp_status = {0};
971 
972     set_float_rounding_mode(rounding_mode, &fp_status);
973     return float32_to_int32(
974             float32_scalbn(v, scale, &fp_status), &fp_status);
975 }
976 
977 uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
978 {
979     float_status fp_status = {0};
980     float32 res;
981 
982     set_float_rounding_mode(rounding_mode, &fp_status);
983 
984     res = float32_scalbn(v, scale, &fp_status);
985 
986     if (float32_is_neg(v) && !float32_is_any_nan(v)) {
987         return float32_to_int32(res, &fp_status);
988     } else {
989         return float32_to_uint32(res, &fp_status);
990     }
991 }
992 
993 float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
994 {
995     return float32_scalbn(int32_to_float32(v, &env->fp_status),
996             (int32_t)scale, &env->fp_status);
997 }
998 
999 float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
1000 {
1001     return float32_scalbn(uint32_to_float32(v, &env->fp_status),
1002             (int32_t)scale, &env->fp_status);
1003 }
1004 
1005 static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
1006 {
1007     if (v) {
1008         env->sregs[BR] |= br;
1009     } else {
1010         env->sregs[BR] &= ~br;
1011     }
1012 }
1013 
1014 void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1015 {
1016     set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
1017 }
1018 
1019 void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1020 {
1021     set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
1022 }
1023 
1024 void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1025 {
1026     int v = float32_compare_quiet(a, b, &env->fp_status);
1027     set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
1028 }
1029 
1030 void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1031 {
1032     set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
1033 }
1034 
1035 void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1036 {
1037     int v = float32_compare_quiet(a, b, &env->fp_status);
1038     set_br(env, v == float_relation_less || v == float_relation_unordered, br);
1039 }
1040 
1041 void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1042 {
1043     set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
1044 }
1045 
1046 void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
1047 {
1048     int v = float32_compare_quiet(a, b, &env->fp_status);
1049     set_br(env, v != float_relation_greater, br);
1050 }
1051 
1052 uint32_t HELPER(rer)(CPUXtensaState *env, uint32_t addr)
1053 {
1054 #ifndef CONFIG_USER_ONLY
1055     return address_space_ldl(env->address_space_er, addr,
1056                              MEMTXATTRS_UNSPECIFIED, NULL);
1057 #else
1058     return 0;
1059 #endif
1060 }
1061 
1062 void HELPER(wer)(CPUXtensaState *env, uint32_t data, uint32_t addr)
1063 {
1064 #ifndef CONFIG_USER_ONLY
1065     address_space_stl(env->address_space_er, addr, data,
1066                       MEMTXATTRS_UNSPECIFIED, NULL);
1067 #endif
1068 }
1069