xref: /openbmc/qemu/target/arm/tcg/op_helper.c (revision fe1a3ace13a8b53fc20c74fb7e3337f754396e6b)
1 /*
2  *  ARM helper routines
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/target_page.h"
24 #include "internals.h"
25 #include "cpu-features.h"
26 #include "exec/exec-all.h"
27 #include "accel/tcg/cpu-ldst.h"
28 #include "accel/tcg/probe.h"
29 #include "cpregs.h"
30 
31 #define SIGNBIT (uint32_t)0x80000000
32 #define SIGNBIT64 ((uint64_t)1 << 63)
33 
34 int exception_target_el(CPUARMState *env)
35 {
36     int target_el = MAX(1, arm_current_el(env));
37 
38     /*
39      * No such thing as secure EL1 if EL3 is aarch32,
40      * so update the target EL to EL3 in this case.
41      */
42     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
43         target_el = 3;
44     }
45 
46     return target_el;
47 }
48 
49 void raise_exception(CPUARMState *env, uint32_t excp,
50                      uint32_t syndrome, uint32_t target_el)
51 {
52     CPUState *cs = env_cpu(env);
53 
54     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
55         /*
56          * Redirect NS EL1 exceptions to NS EL2. These are reported with
57          * their original syndrome register value, with the exception of
58          * SIMD/FP access traps, which are reported as uncategorized
59          * (see DDI0478C.a D1.10.4)
60          */
61         target_el = 2;
62         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
63             syndrome = syn_uncategorized();
64         }
65     }
66 
67     assert(!excp_is_internal(excp));
68     cs->exception_index = excp;
69     env->exception.syndrome = syndrome;
70     env->exception.target_el = target_el;
71     cpu_loop_exit(cs);
72 }
73 
74 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
75                         uint32_t target_el, uintptr_t ra)
76 {
77     CPUState *cs = env_cpu(env);
78 
79     /*
80      * restore_state_to_opc() will set env->exception.syndrome, so
81      * we must restore CPU state here before setting the syndrome
82      * the caller passed us, and cannot use cpu_loop_exit_restore().
83      */
84     cpu_restore_state(cs, ra);
85     raise_exception(env, excp, syndrome, target_el);
86 }
87 
88 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
89                           uint64_t ireg, uint64_t def)
90 {
91     uint64_t tmp, val = 0;
92     uint32_t maxindex = ((desc & 3) + 1) * 8;
93     uint32_t base_reg = desc >> 2;
94     uint32_t shift, index, reg;
95 
96     for (shift = 0; shift < 64; shift += 8) {
97         index = (ireg >> shift) & 0xff;
98         if (index < maxindex) {
99             reg = base_reg + (index >> 3);
100             tmp = *aa32_vfp_dreg(env, reg);
101             tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
102         } else {
103             tmp = def & (0xffull << shift);
104         }
105         val |= tmp;
106     }
107     return val;
108 }
109 
110 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
111 {
112     /*
113      * Perform the v8M stack limit check for SP updates from translated code,
114      * raising an exception if the limit is breached.
115      */
116     if (newvalue < v7m_sp_limit(env)) {
117         /*
118          * Stack limit exceptions are a rare case, so rather than syncing
119          * PC/condbits before the call, we use raise_exception_ra() so
120          * that cpu_restore_state() will sort them out.
121          */
122         raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
123     }
124 }
125 
126 /* Sign/zero extend */
127 uint32_t HELPER(sxtb16)(uint32_t x)
128 {
129     uint32_t res;
130     res = (uint16_t)(int8_t)x;
131     res |= (uint32_t)(int8_t)(x >> 16) << 16;
132     return res;
133 }
134 
135 static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
136 {
137     /*
138      * Take a division-by-zero exception if necessary; otherwise return
139      * to get the usual non-trapping division behaviour (result of 0)
140      */
141     if (arm_feature(env, ARM_FEATURE_M)
142         && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
143         raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
144     }
145 }
146 
147 uint32_t HELPER(uxtb16)(uint32_t x)
148 {
149     uint32_t res;
150     res = (uint16_t)(uint8_t)x;
151     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
152     return res;
153 }
154 
155 int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
156 {
157     if (den == 0) {
158         handle_possible_div0_trap(env, GETPC());
159         return 0;
160     }
161     if (num == INT_MIN && den == -1) {
162         return INT_MIN;
163     }
164     return num / den;
165 }
166 
167 uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
168 {
169     if (den == 0) {
170         handle_possible_div0_trap(env, GETPC());
171         return 0;
172     }
173     return num / den;
174 }
175 
176 uint32_t HELPER(rbit)(uint32_t x)
177 {
178     return revbit32(x);
179 }
180 
181 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
182 {
183     uint32_t res = a + b;
184     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
185         env->QF = 1;
186     return res;
187 }
188 
189 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
190 {
191     uint32_t res = a + b;
192     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
193         env->QF = 1;
194         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
195     }
196     return res;
197 }
198 
199 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
200 {
201     uint32_t res = a - b;
202     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
203         env->QF = 1;
204         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
205     }
206     return res;
207 }
208 
209 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
210 {
211     uint32_t res = a + b;
212     if (res < a) {
213         env->QF = 1;
214         res = ~0;
215     }
216     return res;
217 }
218 
219 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
220 {
221     uint32_t res = a - b;
222     if (res > a) {
223         env->QF = 1;
224         res = 0;
225     }
226     return res;
227 }
228 
229 /* Signed saturation.  */
230 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
231 {
232     int32_t top;
233     uint32_t mask;
234 
235     top = val >> shift;
236     mask = (1u << shift) - 1;
237     if (top > 0) {
238         env->QF = 1;
239         return mask;
240     } else if (top < -1) {
241         env->QF = 1;
242         return ~mask;
243     }
244     return val;
245 }
246 
247 /* Unsigned saturation.  */
248 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
249 {
250     uint32_t max;
251 
252     max = (1u << shift) - 1;
253     if (val < 0) {
254         env->QF = 1;
255         return 0;
256     } else if (val > max) {
257         env->QF = 1;
258         return max;
259     }
260     return val;
261 }
262 
263 /* Signed saturate.  */
264 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
265 {
266     return do_ssat(env, x, shift);
267 }
268 
269 /* Dual halfword signed saturate.  */
270 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
271 {
272     uint32_t res;
273 
274     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
275     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
276     return res;
277 }
278 
279 /* Unsigned saturate.  */
280 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
281 {
282     return do_usat(env, x, shift);
283 }
284 
285 /* Dual halfword unsigned saturate.  */
286 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
287 {
288     uint32_t res;
289 
290     res = (uint16_t)do_usat(env, (int16_t)x, shift);
291     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
292     return res;
293 }
294 
295 void HELPER(setend)(CPUARMState *env)
296 {
297     env->uncached_cpsr ^= CPSR_E;
298     arm_rebuild_hflags(env);
299 }
300 
301 void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
302 {
303     /*
304      * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
305      * check if HSTR.TJDBX means we need to trap to EL2.
306      */
307     if (env->cp15.hstr_el2 & HSTR_TJDBX) {
308         /*
309          * We know the condition code check passed, so take the IMPDEF
310          * choice to always report CV=1 COND 0xe
311          */
312         uint32_t syn = syn_bxjtrap(1, 0xe, rm);
313         raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
314     }
315 }
316 
317 #ifndef CONFIG_USER_ONLY
318 /*
319  * Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
320  * The function returns the target EL (1-3) if the instruction is to be trapped;
321  * otherwise it returns 0 indicating it is not trapped.
322  * For a trap, *excp is updated with the EXCP_* trap type to use.
323  */
324 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe, uint32_t *excp)
325 {
326     int cur_el = arm_current_el(env);
327     uint64_t mask;
328 
329     *excp = EXCP_UDEF;
330 
331     if (arm_feature(env, ARM_FEATURE_M)) {
332         /* M profile cores can never trap WFI/WFE. */
333         return 0;
334     }
335 
336     /* If we are currently in EL0 then we need to check if SCTLR is set up for
337      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
338      */
339     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
340         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
341         if (!(arm_sctlr(env, cur_el) & mask)) {
342             return exception_target_el(env);
343         }
344     }
345 
346     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
347      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
348      * bits will be zero indicating no trap.
349      */
350     if (cur_el < 2) {
351         mask = is_wfe ? HCR_TWE : HCR_TWI;
352         if (arm_hcr_el2_eff(env) & mask) {
353             return 2;
354         }
355     }
356 
357     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
358     if (arm_feature(env, ARM_FEATURE_V8) && !arm_is_el3_or_mon(env)) {
359         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
360         if (env->cp15.scr_el3 & mask) {
361             if (!arm_el_is_aa64(env, 3)) {
362                 *excp = EXCP_MON_TRAP;
363             }
364             return 3;
365         }
366     }
367 
368     return 0;
369 }
370 #endif
371 
372 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
373 {
374 #ifdef CONFIG_USER_ONLY
375     /*
376      * WFI in the user-mode emulator is technically permitted but not
377      * something any real-world code would do. AArch64 Linux kernels
378      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
379      * AArch32 kernels don't trap it so it will delay a bit.
380      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
381      * would trigger an abort.
382      */
383     return;
384 #else
385     CPUState *cs = env_cpu(env);
386     uint32_t excp;
387     int target_el = check_wfx_trap(env, false, &excp);
388 
389     if (cpu_has_work(cs)) {
390         /* Don't bother to go into our "low power state" if
391          * we would just wake up immediately.
392          */
393         return;
394     }
395 
396     if (target_el) {
397         if (env->aarch64) {
398             env->pc -= insn_len;
399         } else {
400             env->regs[15] -= insn_len;
401         }
402 
403         raise_exception(env, excp, syn_wfx(1, 0xe, 0, insn_len == 2),
404                         target_el);
405     }
406 
407     cs->exception_index = EXCP_HLT;
408     cs->halted = 1;
409     cpu_loop_exit(cs);
410 #endif
411 }
412 
413 void HELPER(wfit)(CPUARMState *env, uint64_t timeout)
414 {
415 #ifdef CONFIG_USER_ONLY
416     /*
417      * WFI in the user-mode emulator is technically permitted but not
418      * something any real-world code would do. AArch64 Linux kernels
419      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
420      * AArch32 kernels don't trap it so it will delay a bit.
421      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
422      * would trigger an abort.
423      */
424     return;
425 #else
426     ARMCPU *cpu = env_archcpu(env);
427     CPUState *cs = env_cpu(env);
428     uint32_t excp;
429     int target_el = check_wfx_trap(env, false, &excp);
430     /* The WFIT should time out when CNTVCT_EL0 >= the specified value. */
431     uint64_t cntval = gt_get_countervalue(env);
432     /*
433      * We want the value that we would get if we read CNTVCT_EL0 from
434      * the current exception level, so the direct_access offset, not
435      * the indirect_access one. Compare the pseudocode LocalTimeoutEvent(),
436      * which calls VirtualCounterTimer().
437      */
438     uint64_t offset = gt_direct_access_timer_offset(env, GTIMER_VIRT);
439     uint64_t cntvct = cntval - offset;
440     uint64_t nexttick;
441 
442     if (cpu_has_work(cs) || cntvct >= timeout) {
443         /*
444          * Don't bother to go into our "low power state" if
445          * we would just wake up immediately.
446          */
447         return;
448     }
449 
450     if (target_el) {
451         env->pc -= 4;
452         raise_exception(env, excp, syn_wfx(1, 0xe, 0, false), target_el);
453     }
454 
455     if (uadd64_overflow(timeout, offset, &nexttick)) {
456         nexttick = UINT64_MAX;
457     }
458     if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) {
459         /*
460          * If the timeout is too long for the signed 64-bit range
461          * of a QEMUTimer, let it expire early.
462          */
463         timer_mod_ns(cpu->wfxt_timer, INT64_MAX);
464     } else {
465         timer_mod(cpu->wfxt_timer, nexttick);
466     }
467     cs->exception_index = EXCP_HLT;
468     cs->halted = 1;
469     cpu_loop_exit(cs);
470 #endif
471 }
472 
473 void HELPER(wfe)(CPUARMState *env)
474 {
475     /* This is a hint instruction that is semantically different
476      * from YIELD even though we currently implement it identically.
477      * Don't actually halt the CPU, just yield back to top
478      * level loop. This is not going into a "low power state"
479      * (ie halting until some event occurs), so we never take
480      * a configurable trap to a different exception level.
481      */
482     HELPER(yield)(env);
483 }
484 
485 void HELPER(yield)(CPUARMState *env)
486 {
487     CPUState *cs = env_cpu(env);
488 
489     /* This is a non-trappable hint instruction that generally indicates
490      * that the guest is currently busy-looping. Yield control back to the
491      * top level loop so that a more deserving VCPU has a chance to run.
492      */
493     cs->exception_index = EXCP_YIELD;
494     cpu_loop_exit(cs);
495 }
496 
497 /* Raise an internal-to-QEMU exception. This is limited to only
498  * those EXCP values which are special cases for QEMU to interrupt
499  * execution and not to be used for exceptions which are passed to
500  * the guest (those must all have syndrome information and thus should
501  * use exception_with_syndrome*).
502  */
503 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
504 {
505     CPUState *cs = env_cpu(env);
506 
507     assert(excp_is_internal(excp));
508     cs->exception_index = excp;
509     cpu_loop_exit(cs);
510 }
511 
512 /* Raise an exception with the specified syndrome register value */
513 void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
514                                         uint32_t syndrome, uint32_t target_el)
515 {
516     raise_exception(env, excp, syndrome, target_el);
517 }
518 
519 /*
520  * Raise an exception with the specified syndrome register value
521  * to the default target el.
522  */
523 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
524                                      uint32_t syndrome)
525 {
526     raise_exception(env, excp, syndrome, exception_target_el(env));
527 }
528 
529 uint32_t HELPER(cpsr_read)(CPUARMState *env)
530 {
531     return cpsr_read(env) & ~CPSR_EXEC;
532 }
533 
534 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
535 {
536     cpsr_write(env, val, mask, CPSRWriteByInstr);
537     /* TODO: Not all cpsr bits are relevant to hflags.  */
538     arm_rebuild_hflags(env);
539 }
540 
541 /* Write the CPSR for a 32-bit exception return */
542 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
543 {
544     uint32_t mask;
545 
546     bql_lock();
547     arm_call_pre_el_change_hook(env_archcpu(env));
548     bql_unlock();
549 
550     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
551     cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
552 
553     /* Generated code has already stored the new PC value, but
554      * without masking out its low bits, because which bits need
555      * masking depends on whether we're returning to Thumb or ARM
556      * state. Do the masking now.
557      */
558     env->regs[15] &= (env->thumb ? ~1 : ~3);
559     arm_rebuild_hflags(env);
560 
561     bql_lock();
562     arm_call_el_change_hook(env_archcpu(env));
563     bql_unlock();
564 }
565 
566 /* Access to user mode registers from privileged modes.  */
567 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
568 {
569     uint32_t val;
570 
571     if (regno == 13) {
572         val = env->banked_r13[BANK_USRSYS];
573     } else if (regno == 14) {
574         val = env->banked_r14[BANK_USRSYS];
575     } else if (regno >= 8
576                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
577         val = env->usr_regs[regno - 8];
578     } else {
579         val = env->regs[regno];
580     }
581     return val;
582 }
583 
584 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
585 {
586     if (regno == 13) {
587         env->banked_r13[BANK_USRSYS] = val;
588     } else if (regno == 14) {
589         env->banked_r14[BANK_USRSYS] = val;
590     } else if (regno >= 8
591                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
592         env->usr_regs[regno - 8] = val;
593     } else {
594         env->regs[regno] = val;
595     }
596 }
597 
598 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
599 {
600     if ((env->uncached_cpsr & CPSR_M) == mode) {
601         env->regs[13] = val;
602     } else {
603         env->banked_r13[bank_number(mode)] = val;
604     }
605 }
606 
607 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
608 {
609     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
610         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
611          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
612          */
613         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
614                         exception_target_el(env));
615     }
616 
617     if ((env->uncached_cpsr & CPSR_M) == mode) {
618         return env->regs[13];
619     } else {
620         return env->banked_r13[bank_number(mode)];
621     }
622 }
623 
624 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
625                                       uint32_t regno)
626 {
627     /* Raise an exception if the requested access is one of the UNPREDICTABLE
628      * cases; otherwise return. This broadly corresponds to the pseudocode
629      * BankedRegisterAccessValid() and SPSRAccessValid(),
630      * except that we have already handled some cases at translate time.
631      */
632     int curmode = env->uncached_cpsr & CPSR_M;
633 
634     if (tgtmode == ARM_CPU_MODE_HYP) {
635         /*
636          * Handle Hyp target regs first because some are special cases
637          * which don't want the usual "not accessible from tgtmode" check.
638          */
639         switch (regno) {
640         case 16 ... 17: /* ELR_Hyp, SPSR_Hyp */
641             if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
642                 goto undef;
643             }
644             break;
645         case 13:
646             if (curmode != ARM_CPU_MODE_MON) {
647                 goto undef;
648             }
649             break;
650         default:
651             g_assert_not_reached();
652         }
653         return;
654     }
655 
656     if (curmode == tgtmode) {
657         goto undef;
658     }
659 
660     if (tgtmode == ARM_CPU_MODE_USR) {
661         switch (regno) {
662         case 8 ... 12:
663             if (curmode != ARM_CPU_MODE_FIQ) {
664                 goto undef;
665             }
666             break;
667         case 13:
668             if (curmode == ARM_CPU_MODE_SYS) {
669                 goto undef;
670             }
671             break;
672         case 14:
673             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
674                 goto undef;
675             }
676             break;
677         default:
678             break;
679         }
680     }
681 
682     return;
683 
684 undef:
685     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
686                     exception_target_el(env));
687 }
688 
689 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
690                         uint32_t regno)
691 {
692     msr_mrs_banked_exc_checks(env, tgtmode, regno);
693 
694     switch (regno) {
695     case 16: /* SPSRs */
696         if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
697             /* Only happens for SPSR_Hyp access in Hyp mode */
698             env->spsr = value;
699         } else {
700             env->banked_spsr[bank_number(tgtmode)] = value;
701         }
702         break;
703     case 17: /* ELR_Hyp */
704         env->elr_el[2] = value;
705         break;
706     case 13:
707         env->banked_r13[bank_number(tgtmode)] = value;
708         break;
709     case 14:
710         env->banked_r14[r14_bank_number(tgtmode)] = value;
711         break;
712     case 8 ... 12:
713         switch (tgtmode) {
714         case ARM_CPU_MODE_USR:
715             env->usr_regs[regno - 8] = value;
716             break;
717         case ARM_CPU_MODE_FIQ:
718             env->fiq_regs[regno - 8] = value;
719             break;
720         default:
721             g_assert_not_reached();
722         }
723         break;
724     default:
725         g_assert_not_reached();
726     }
727 }
728 
729 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
730 {
731     msr_mrs_banked_exc_checks(env, tgtmode, regno);
732 
733     switch (regno) {
734     case 16: /* SPSRs */
735         if (tgtmode == (env->uncached_cpsr & CPSR_M)) {
736             /* Only happens for SPSR_Hyp access in Hyp mode */
737             return env->spsr;
738         } else {
739             return env->banked_spsr[bank_number(tgtmode)];
740         }
741     case 17: /* ELR_Hyp */
742         return env->elr_el[2];
743     case 13:
744         return env->banked_r13[bank_number(tgtmode)];
745     case 14:
746         return env->banked_r14[r14_bank_number(tgtmode)];
747     case 8 ... 12:
748         switch (tgtmode) {
749         case ARM_CPU_MODE_USR:
750             return env->usr_regs[regno - 8];
751         case ARM_CPU_MODE_FIQ:
752             return env->fiq_regs[regno - 8];
753         default:
754             g_assert_not_reached();
755         }
756     default:
757         g_assert_not_reached();
758     }
759 }
760 
761 const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
762                                         uint32_t syndrome, uint32_t isread)
763 {
764     ARMCPU *cpu = env_archcpu(env);
765     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
766     CPAccessResult res = CP_ACCESS_OK;
767     int target_el;
768     uint32_t excp;
769 
770     assert(ri != NULL);
771 
772     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
773         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
774         res = CP_ACCESS_UNDEFINED;
775         goto fail;
776     }
777 
778     if (ri->accessfn) {
779         res = ri->accessfn(env, ri, isread);
780     }
781 
782     /*
783      * If the access function indicates a trap from EL0 to EL1 then
784      * that always takes priority over the HSTR_EL2 trap. (If it indicates
785      * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
786      * a trap to EL2, then the syndrome is the same either way so we don't
787      * care whether technically the architecture says that HSTR_EL2 trap or
788      * the other trap takes priority. So we take the "check HSTR_EL2" path
789      * for all of those cases.)
790      */
791     if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) < 2) &&
792         arm_current_el(env) == 0) {
793         goto fail;
794     }
795 
796     /*
797      * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
798      * we only need to check here for traps from EL0.
799      */
800     if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
801         arm_is_el2_enabled(env) &&
802         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
803         uint32_t mask = 1 << ri->crn;
804 
805         if (ri->type & ARM_CP_64BIT) {
806             mask = 1 << ri->crm;
807         }
808 
809         /* T4 and T14 are RES0 */
810         mask &= ~((1 << 4) | (1 << 14));
811 
812         if (env->cp15.hstr_el2 & mask) {
813             res = CP_ACCESS_TRAP_EL2;
814             goto fail;
815         }
816     }
817 
818     /*
819      * Fine-grained traps also are lower priority than undef-to-EL1,
820      * higher priority than trap-to-EL3, and we don't care about priority
821      * order with other EL2 traps because the syndrome value is the same.
822      */
823     if (arm_fgt_active(env, arm_current_el(env))) {
824         uint64_t trapword = 0;
825         unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
826         unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
827         bool rev = FIELD_EX32(ri->fgt, FGT, REV);
828         bool nxs = FIELD_EX32(ri->fgt, FGT, NXS);
829         bool trapbit;
830 
831         if (ri->fgt & FGT_EXEC) {
832             assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
833             trapword = env->cp15.fgt_exec[idx];
834         } else if (isread && (ri->fgt & FGT_R)) {
835             assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
836             trapword = env->cp15.fgt_read[idx];
837         } else if (!isread && (ri->fgt & FGT_W)) {
838             assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
839             trapword = env->cp15.fgt_write[idx];
840         }
841 
842         if (nxs && (arm_hcrx_el2_eff(env) & HCRX_FGTNXS)) {
843             /*
844              * If HCRX_EL2.FGTnXS is 1 then the fine-grained trap for
845              * TLBI maintenance insns does *not* apply to the nXS variant.
846              */
847             trapbit = 0;
848         } else {
849             trapbit = extract64(trapword, bitpos, 1);
850         }
851         if (trapbit != rev) {
852             res = CP_ACCESS_TRAP_EL2;
853             goto fail;
854         }
855     }
856 
857     if (likely(res == CP_ACCESS_OK)) {
858         return ri;
859     }
860 
861  fail:
862     excp = EXCP_UDEF;
863     switch (res) {
864         /* CP_ACCESS_TRAP* traps are always direct to a specified EL */
865     case CP_ACCESS_TRAP_EL3:
866         /*
867          * If EL3 is AArch32 then there's no syndrome register; the cases
868          * where we would raise a SystemAccessTrap to AArch64 EL3 all become
869          * raising a Monitor trap exception. (Because there's no visible
870          * syndrome it doesn't matter what we pass to raise_exception().)
871          */
872         if (!arm_el_is_aa64(env, 3)) {
873             excp = EXCP_MON_TRAP;
874         }
875         break;
876     case CP_ACCESS_TRAP_EL2:
877     case CP_ACCESS_TRAP_EL1:
878         break;
879     case CP_ACCESS_UNDEFINED:
880         /* CP_ACCESS_UNDEFINED is never direct to a specified EL */
881         if (cpu_isar_feature(aa64_ids, cpu) && isread &&
882             arm_cpreg_in_idspace(ri)) {
883             /*
884              * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
885              * not EC_UNCATEGORIZED
886              */
887             break;
888         }
889         syndrome = syn_uncategorized();
890         break;
891     default:
892         g_assert_not_reached();
893     }
894 
895     target_el = res & CP_ACCESS_EL_MASK;
896     switch (target_el) {
897     case 0:
898         target_el = exception_target_el(env);
899         break;
900     case 1:
901         assert(arm_current_el(env) < 2);
902         break;
903     case 2:
904         assert(arm_current_el(env) != 3);
905         assert(arm_is_el2_enabled(env));
906         break;
907     case 3:
908         assert(arm_feature(env, ARM_FEATURE_EL3));
909         break;
910     default:
911         g_assert_not_reached();
912     }
913 
914     raise_exception(env, excp, syndrome, target_el);
915 }
916 
917 const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
918 {
919     ARMCPU *cpu = env_archcpu(env);
920     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
921 
922     assert(ri != NULL);
923     return ri;
924 }
925 
926 /*
927  * Test for HCR_EL2.TIDCP at EL1.
928  * Since implementation defined registers are rare, and within QEMU
929  * most of them are no-op, do not waste HFLAGS space for this and
930  * always use a helper.
931  */
932 void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
933 {
934     if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
935         raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
936     }
937 }
938 
939 /*
940  * Similarly, for FEAT_TIDCP1 at EL0.
941  * We have already checked for the presence of the feature.
942  */
943 void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
944 {
945     /* See arm_sctlr(), but we also need the sctlr el. */
946     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
947     int target_el;
948 
949     switch (mmu_idx) {
950     case ARMMMUIdx_E20_0:
951         target_el = 2;
952         break;
953     case ARMMMUIdx_E30_0:
954         target_el = 3;
955         break;
956     default:
957         target_el = 1;
958         break;
959     }
960 
961     /*
962      * The bit is not valid unless the target el is aa64, but since the
963      * bit test is simpler perform that first and check validity after.
964      */
965     if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
966         && arm_el_is_aa64(env, target_el)) {
967         raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
968     }
969 }
970 
971 void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
972 {
973     const ARMCPRegInfo *ri = rip;
974 
975     if (ri->type & ARM_CP_IO) {
976         bql_lock();
977         ri->writefn(env, ri, value);
978         bql_unlock();
979     } else {
980         ri->writefn(env, ri, value);
981     }
982 }
983 
984 uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
985 {
986     const ARMCPRegInfo *ri = rip;
987     uint32_t res;
988 
989     if (ri->type & ARM_CP_IO) {
990         bql_lock();
991         res = ri->readfn(env, ri);
992         bql_unlock();
993     } else {
994         res = ri->readfn(env, ri);
995     }
996 
997     return res;
998 }
999 
1000 void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
1001 {
1002     const ARMCPRegInfo *ri = rip;
1003 
1004     if (ri->type & ARM_CP_IO) {
1005         bql_lock();
1006         ri->writefn(env, ri, value);
1007         bql_unlock();
1008     } else {
1009         ri->writefn(env, ri, value);
1010     }
1011 }
1012 
1013 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
1014 {
1015     const ARMCPRegInfo *ri = rip;
1016     uint64_t res;
1017 
1018     if (ri->type & ARM_CP_IO) {
1019         bql_lock();
1020         res = ri->readfn(env, ri);
1021         bql_unlock();
1022     } else {
1023         res = ri->readfn(env, ri);
1024     }
1025 
1026     return res;
1027 }
1028 
1029 void HELPER(pre_hvc)(CPUARMState *env)
1030 {
1031     ARMCPU *cpu = env_archcpu(env);
1032     int cur_el = arm_current_el(env);
1033     /* FIXME: Use actual secure state.  */
1034     bool secure = false;
1035     bool undef;
1036 
1037     if (arm_is_psci_call(cpu, EXCP_HVC)) {
1038         /* If PSCI is enabled and this looks like a valid PSCI call then
1039          * that overrides the architecturally mandated HVC behaviour.
1040          */
1041         return;
1042     }
1043 
1044     if (!arm_feature(env, ARM_FEATURE_EL2)) {
1045         /* If EL2 doesn't exist, HVC always UNDEFs */
1046         undef = true;
1047     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
1048         /* EL3.HCE has priority over EL2.HCD. */
1049         undef = !(env->cp15.scr_el3 & SCR_HCE);
1050     } else {
1051         undef = env->cp15.hcr_el2 & HCR_HCD;
1052     }
1053 
1054     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
1055      * For ARMv8/AArch64, HVC is allowed in EL3.
1056      * Note that we've already trapped HVC from EL0 at translation
1057      * time.
1058      */
1059     if (secure && (!is_a64(env) || cur_el == 1)) {
1060         undef = true;
1061     }
1062 
1063     if (undef) {
1064         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1065                         exception_target_el(env));
1066     }
1067 }
1068 
1069 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
1070 {
1071     ARMCPU *cpu = env_archcpu(env);
1072     int cur_el = arm_current_el(env);
1073     bool secure = arm_is_secure(env);
1074     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
1075 
1076     /*
1077      * SMC behaviour is summarized in the following table.
1078      * This helper handles the "Trap to EL2" and "Undef insn" cases.
1079      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
1080      * helper.
1081      *
1082      *  -> ARM_FEATURE_EL3 and !SMD
1083      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1084      *
1085      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1086      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
1087      *  Conduit not SMC          Trap to EL2         Trap to EL3
1088      *
1089      *
1090      *  -> ARM_FEATURE_EL3 and SMD
1091      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1092      *
1093      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1094      *  Conduit SMC, inval call  Trap to EL2         Undef insn
1095      *  Conduit not SMC          Trap to EL2         Undef insn
1096      *
1097      *
1098      *  -> !ARM_FEATURE_EL3
1099      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
1100      *
1101      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
1102      *  Conduit SMC, inval call  Trap to EL2         Undef insn
1103      *  Conduit not SMC          Undef or trap[1]    Undef insn
1104      *
1105      * [1] In this case:
1106      *  - if HCR_EL2.NV == 1 we must trap to EL2
1107      *  - if HCR_EL2.NV == 0 then newer architecture revisions permit
1108      *    AArch64 (but not AArch32) to trap to EL2 as an IMPDEF choice
1109      *  - otherwise we must UNDEF
1110      * We take the IMPDEF choice to always UNDEF if HCR_EL2.NV == 0.
1111      */
1112 
1113     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
1114      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
1115      *  extensions, SMD only applies to NS state.
1116      * On ARMv7 without the Virtualization extensions, the SMD bit
1117      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
1118      * so we need not special case this here.
1119      */
1120     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
1121                                                      : smd_flag && !secure;
1122 
1123     if (!arm_feature(env, ARM_FEATURE_EL3) &&
1124         !(arm_hcr_el2_eff(env) & HCR_NV) &&
1125         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
1126         /*
1127          * If we have no EL3 then traditionally SMC always UNDEFs and can't be
1128          * trapped to EL2. For nested virtualization, SMC can be trapped to
1129          * the outer hypervisor. PSCI-via-SMC is a sort of ersatz EL3
1130          * firmware within QEMU, and we want an EL2 guest to be able
1131          * to forbid its EL1 from making PSCI calls into QEMU's
1132          * "firmware" via HCR.TSC, so for these purposes treat
1133          * PSCI-via-SMC as implying an EL3.
1134          * This handles the very last line of the previous table.
1135          */
1136         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1137                         exception_target_el(env));
1138     }
1139 
1140     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
1141         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
1142          * We also want an EL2 guest to be able to forbid its EL1 from
1143          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
1144          * This handles all the "Trap to EL2" cases of the previous table.
1145          */
1146         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
1147     }
1148 
1149     /* Catch the two remaining "Undef insn" cases of the previous table:
1150      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
1151      *    - We don't have EL3 or SMD is set.
1152      */
1153     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
1154         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
1155         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
1156                         exception_target_el(env));
1157     }
1158 }
1159 
1160 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1161    The only way to do that in TCG is a conditional branch, which clobbers
1162    all our temporaries.  For now implement these as helper functions.  */
1163 
1164 /* Similarly for variable shift instructions.  */
1165 
1166 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1167 {
1168     int shift = i & 0xff;
1169     if (shift >= 32) {
1170         if (shift == 32)
1171             env->CF = x & 1;
1172         else
1173             env->CF = 0;
1174         return 0;
1175     } else if (shift != 0) {
1176         env->CF = (x >> (32 - shift)) & 1;
1177         return x << shift;
1178     }
1179     return x;
1180 }
1181 
1182 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1183 {
1184     int shift = i & 0xff;
1185     if (shift >= 32) {
1186         if (shift == 32)
1187             env->CF = (x >> 31) & 1;
1188         else
1189             env->CF = 0;
1190         return 0;
1191     } else if (shift != 0) {
1192         env->CF = (x >> (shift - 1)) & 1;
1193         return x >> shift;
1194     }
1195     return x;
1196 }
1197 
1198 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1199 {
1200     int shift = i & 0xff;
1201     if (shift >= 32) {
1202         env->CF = (x >> 31) & 1;
1203         return (int32_t)x >> 31;
1204     } else if (shift != 0) {
1205         env->CF = (x >> (shift - 1)) & 1;
1206         return (int32_t)x >> shift;
1207     }
1208     return x;
1209 }
1210 
1211 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1212 {
1213     int shift1, shift;
1214     shift1 = i & 0xff;
1215     shift = shift1 & 0x1f;
1216     if (shift == 0) {
1217         if (shift1 != 0)
1218             env->CF = (x >> 31) & 1;
1219         return x;
1220     } else {
1221         env->CF = (x >> (shift - 1)) & 1;
1222         return ((uint32_t)x >> shift) | (x << (32 - shift));
1223     }
1224 }
1225 
1226 void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
1227                           uint32_t access_type, uint32_t mmu_idx,
1228                           uint32_t size)
1229 {
1230     uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1231     uintptr_t ra = GETPC();
1232 
1233     if (likely(size <= in_page)) {
1234         probe_access(env, ptr, size, access_type, mmu_idx, ra);
1235     } else {
1236         probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1237         probe_access(env, ptr + in_page, size - in_page,
1238                      access_type, mmu_idx, ra);
1239     }
1240 }
1241 
1242 /*
1243  * This function corresponds to AArch64.vESBOperation().
1244  * Note that the AArch32 version is not functionally different.
1245  */
1246 void HELPER(vesb)(CPUARMState *env)
1247 {
1248     /*
1249      * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1250      * and will return HCR_EL2.VSE == 0, so nothing happens.
1251      */
1252     uint64_t hcr = arm_hcr_el2_eff(env);
1253     bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1254     bool pending = enabled && (hcr & HCR_VSE);
1255     bool masked  = (env->daif & PSTATE_A);
1256 
1257     /* If VSE pending and masked, defer the exception.  */
1258     if (pending && masked) {
1259         uint32_t syndrome;
1260 
1261         if (arm_el_is_aa64(env, 1)) {
1262             /* Copy across IDS and ISS from VSESR. */
1263             syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1264         } else {
1265             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1266 
1267             if (extended_addresses_enabled(env)) {
1268                 syndrome = arm_fi_to_lfsc(&fi);
1269             } else {
1270                 syndrome = arm_fi_to_sfsc(&fi);
1271             }
1272             /* Copy across AET and ExT from VSESR. */
1273             syndrome |= env->cp15.vsesr_el2 & 0xd000;
1274         }
1275 
1276         /* Set VDISR_EL2.A along with the syndrome. */
1277         env->cp15.vdisr_el2 = syndrome | (1u << 31);
1278 
1279         /* Clear pending virtual SError */
1280         env->cp15.hcr_el2 &= ~HCR_VSE;
1281         cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1282     }
1283 }
1284