xref: /openbmc/qemu/target/arm/tcg/op_helper.c (revision 3b894b69)
1 /*
2  *  ARM helper routines
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "cpregs.h"
27 
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
30 
31 int exception_target_el(CPUARMState *env)
32 {
33     int target_el = MAX(1, arm_current_el(env));
34 
35     /*
36      * No such thing as secure EL1 if EL3 is aarch32,
37      * so update the target EL to EL3 in this case.
38      */
39     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
40         target_el = 3;
41     }
42 
43     return target_el;
44 }
45 
46 void raise_exception(CPUARMState *env, uint32_t excp,
47                      uint32_t syndrome, uint32_t target_el)
48 {
49     CPUState *cs = env_cpu(env);
50 
51     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
52         /*
53          * Redirect NS EL1 exceptions to NS EL2. These are reported with
54          * their original syndrome register value, with the exception of
55          * SIMD/FP access traps, which are reported as uncategorized
56          * (see DDI0478C.a D1.10.4)
57          */
58         target_el = 2;
59         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
60             syndrome = syn_uncategorized();
61         }
62     }
63 
64     assert(!excp_is_internal(excp));
65     cs->exception_index = excp;
66     env->exception.syndrome = syndrome;
67     env->exception.target_el = target_el;
68     cpu_loop_exit(cs);
69 }
70 
71 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
72                         uint32_t target_el, uintptr_t ra)
73 {
74     CPUState *cs = env_cpu(env);
75 
76     /*
77      * restore_state_to_opc() will set env->exception.syndrome, so
78      * we must restore CPU state here before setting the syndrome
79      * the caller passed us, and cannot use cpu_loop_exit_restore().
80      */
81     cpu_restore_state(cs, ra);
82     raise_exception(env, excp, syndrome, target_el);
83 }
84 
85 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
86                           uint64_t ireg, uint64_t def)
87 {
88     uint64_t tmp, val = 0;
89     uint32_t maxindex = ((desc & 3) + 1) * 8;
90     uint32_t base_reg = desc >> 2;
91     uint32_t shift, index, reg;
92 
93     for (shift = 0; shift < 64; shift += 8) {
94         index = (ireg >> shift) & 0xff;
95         if (index < maxindex) {
96             reg = base_reg + (index >> 3);
97             tmp = *aa32_vfp_dreg(env, reg);
98             tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
99         } else {
100             tmp = def & (0xffull << shift);
101         }
102         val |= tmp;
103     }
104     return val;
105 }
106 
107 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
108 {
109     /*
110      * Perform the v8M stack limit check for SP updates from translated code,
111      * raising an exception if the limit is breached.
112      */
113     if (newvalue < v7m_sp_limit(env)) {
114         /*
115          * Stack limit exceptions are a rare case, so rather than syncing
116          * PC/condbits before the call, we use raise_exception_ra() so
117          * that cpu_restore_state() will sort them out.
118          */
119         raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
120     }
121 }
122 
123 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
124 {
125     uint32_t res = a + b;
126     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
127         env->QF = 1;
128     return res;
129 }
130 
131 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
132 {
133     uint32_t res = a + b;
134     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
135         env->QF = 1;
136         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
137     }
138     return res;
139 }
140 
141 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
142 {
143     uint32_t res = a - b;
144     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
145         env->QF = 1;
146         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
147     }
148     return res;
149 }
150 
151 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
152 {
153     uint32_t res = a + b;
154     if (res < a) {
155         env->QF = 1;
156         res = ~0;
157     }
158     return res;
159 }
160 
161 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
162 {
163     uint32_t res = a - b;
164     if (res > a) {
165         env->QF = 1;
166         res = 0;
167     }
168     return res;
169 }
170 
171 /* Signed saturation.  */
172 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
173 {
174     int32_t top;
175     uint32_t mask;
176 
177     top = val >> shift;
178     mask = (1u << shift) - 1;
179     if (top > 0) {
180         env->QF = 1;
181         return mask;
182     } else if (top < -1) {
183         env->QF = 1;
184         return ~mask;
185     }
186     return val;
187 }
188 
189 /* Unsigned saturation.  */
190 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
191 {
192     uint32_t max;
193 
194     max = (1u << shift) - 1;
195     if (val < 0) {
196         env->QF = 1;
197         return 0;
198     } else if (val > max) {
199         env->QF = 1;
200         return max;
201     }
202     return val;
203 }
204 
205 /* Signed saturate.  */
206 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
207 {
208     return do_ssat(env, x, shift);
209 }
210 
211 /* Dual halfword signed saturate.  */
212 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
213 {
214     uint32_t res;
215 
216     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
217     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
218     return res;
219 }
220 
221 /* Unsigned saturate.  */
222 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
223 {
224     return do_usat(env, x, shift);
225 }
226 
227 /* Dual halfword unsigned saturate.  */
228 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
229 {
230     uint32_t res;
231 
232     res = (uint16_t)do_usat(env, (int16_t)x, shift);
233     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
234     return res;
235 }
236 
237 void HELPER(setend)(CPUARMState *env)
238 {
239     env->uncached_cpsr ^= CPSR_E;
240     arm_rebuild_hflags(env);
241 }
242 
243 void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
244 {
245     /*
246      * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
247      * check if HSTR.TJDBX means we need to trap to EL2.
248      */
249     if (env->cp15.hstr_el2 & HSTR_TJDBX) {
250         /*
251          * We know the condition code check passed, so take the IMPDEF
252          * choice to always report CV=1 COND 0xe
253          */
254         uint32_t syn = syn_bxjtrap(1, 0xe, rm);
255         raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
256     }
257 }
258 
259 #ifndef CONFIG_USER_ONLY
260 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
261  * The function returns the target EL (1-3) if the instruction is to be trapped;
262  * otherwise it returns 0 indicating it is not trapped.
263  */
264 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
265 {
266     int cur_el = arm_current_el(env);
267     uint64_t mask;
268 
269     if (arm_feature(env, ARM_FEATURE_M)) {
270         /* M profile cores can never trap WFI/WFE. */
271         return 0;
272     }
273 
274     /* If we are currently in EL0 then we need to check if SCTLR is set up for
275      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
276      */
277     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
278         int target_el;
279 
280         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
281         if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
282             /* Secure EL0 and Secure PL1 is at EL3 */
283             target_el = 3;
284         } else {
285             target_el = 1;
286         }
287 
288         if (!(env->cp15.sctlr_el[target_el] & mask)) {
289             return target_el;
290         }
291     }
292 
293     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
294      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
295      * bits will be zero indicating no trap.
296      */
297     if (cur_el < 2) {
298         mask = is_wfe ? HCR_TWE : HCR_TWI;
299         if (arm_hcr_el2_eff(env) & mask) {
300             return 2;
301         }
302     }
303 
304     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
305     if (cur_el < 3) {
306         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
307         if (env->cp15.scr_el3 & mask) {
308             return 3;
309         }
310     }
311 
312     return 0;
313 }
314 #endif
315 
316 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
317 {
318 #ifdef CONFIG_USER_ONLY
319     /*
320      * WFI in the user-mode emulator is technically permitted but not
321      * something any real-world code would do. AArch64 Linux kernels
322      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
323      * AArch32 kernels don't trap it so it will delay a bit.
324      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
325      * would trigger an abort.
326      */
327     return;
328 #else
329     CPUState *cs = env_cpu(env);
330     int target_el = check_wfx_trap(env, false);
331 
332     if (cpu_has_work(cs)) {
333         /* Don't bother to go into our "low power state" if
334          * we would just wake up immediately.
335          */
336         return;
337     }
338 
339     if (target_el) {
340         if (env->aarch64) {
341             env->pc -= insn_len;
342         } else {
343             env->regs[15] -= insn_len;
344         }
345 
346         raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
347                         target_el);
348     }
349 
350     cs->exception_index = EXCP_HLT;
351     cs->halted = 1;
352     cpu_loop_exit(cs);
353 #endif
354 }
355 
356 void HELPER(wfe)(CPUARMState *env)
357 {
358     /* This is a hint instruction that is semantically different
359      * from YIELD even though we currently implement it identically.
360      * Don't actually halt the CPU, just yield back to top
361      * level loop. This is not going into a "low power state"
362      * (ie halting until some event occurs), so we never take
363      * a configurable trap to a different exception level.
364      */
365     HELPER(yield)(env);
366 }
367 
368 void HELPER(yield)(CPUARMState *env)
369 {
370     CPUState *cs = env_cpu(env);
371 
372     /* This is a non-trappable hint instruction that generally indicates
373      * that the guest is currently busy-looping. Yield control back to the
374      * top level loop so that a more deserving VCPU has a chance to run.
375      */
376     cs->exception_index = EXCP_YIELD;
377     cpu_loop_exit(cs);
378 }
379 
380 /* Raise an internal-to-QEMU exception. This is limited to only
381  * those EXCP values which are special cases for QEMU to interrupt
382  * execution and not to be used for exceptions which are passed to
383  * the guest (those must all have syndrome information and thus should
384  * use exception_with_syndrome*).
385  */
386 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
387 {
388     CPUState *cs = env_cpu(env);
389 
390     assert(excp_is_internal(excp));
391     cs->exception_index = excp;
392     cpu_loop_exit(cs);
393 }
394 
395 /* Raise an exception with the specified syndrome register value */
396 void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
397                                         uint32_t syndrome, uint32_t target_el)
398 {
399     raise_exception(env, excp, syndrome, target_el);
400 }
401 
402 /*
403  * Raise an exception with the specified syndrome register value
404  * to the default target el.
405  */
406 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
407                                      uint32_t syndrome)
408 {
409     raise_exception(env, excp, syndrome, exception_target_el(env));
410 }
411 
412 uint32_t HELPER(cpsr_read)(CPUARMState *env)
413 {
414     return cpsr_read(env) & ~CPSR_EXEC;
415 }
416 
417 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
418 {
419     cpsr_write(env, val, mask, CPSRWriteByInstr);
420     /* TODO: Not all cpsr bits are relevant to hflags.  */
421     arm_rebuild_hflags(env);
422 }
423 
424 /* Write the CPSR for a 32-bit exception return */
425 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
426 {
427     uint32_t mask;
428 
429     qemu_mutex_lock_iothread();
430     arm_call_pre_el_change_hook(env_archcpu(env));
431     qemu_mutex_unlock_iothread();
432 
433     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
434     cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
435 
436     /* Generated code has already stored the new PC value, but
437      * without masking out its low bits, because which bits need
438      * masking depends on whether we're returning to Thumb or ARM
439      * state. Do the masking now.
440      */
441     env->regs[15] &= (env->thumb ? ~1 : ~3);
442     arm_rebuild_hflags(env);
443 
444     qemu_mutex_lock_iothread();
445     arm_call_el_change_hook(env_archcpu(env));
446     qemu_mutex_unlock_iothread();
447 }
448 
449 /* Access to user mode registers from privileged modes.  */
450 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
451 {
452     uint32_t val;
453 
454     if (regno == 13) {
455         val = env->banked_r13[BANK_USRSYS];
456     } else if (regno == 14) {
457         val = env->banked_r14[BANK_USRSYS];
458     } else if (regno >= 8
459                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
460         val = env->usr_regs[regno - 8];
461     } else {
462         val = env->regs[regno];
463     }
464     return val;
465 }
466 
467 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
468 {
469     if (regno == 13) {
470         env->banked_r13[BANK_USRSYS] = val;
471     } else if (regno == 14) {
472         env->banked_r14[BANK_USRSYS] = val;
473     } else if (regno >= 8
474                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
475         env->usr_regs[regno - 8] = val;
476     } else {
477         env->regs[regno] = val;
478     }
479 }
480 
481 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
482 {
483     if ((env->uncached_cpsr & CPSR_M) == mode) {
484         env->regs[13] = val;
485     } else {
486         env->banked_r13[bank_number(mode)] = val;
487     }
488 }
489 
490 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
491 {
492     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
493         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
494          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
495          */
496         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
497                         exception_target_el(env));
498     }
499 
500     if ((env->uncached_cpsr & CPSR_M) == mode) {
501         return env->regs[13];
502     } else {
503         return env->banked_r13[bank_number(mode)];
504     }
505 }
506 
507 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
508                                       uint32_t regno)
509 {
510     /* Raise an exception if the requested access is one of the UNPREDICTABLE
511      * cases; otherwise return. This broadly corresponds to the pseudocode
512      * BankedRegisterAccessValid() and SPSRAccessValid(),
513      * except that we have already handled some cases at translate time.
514      */
515     int curmode = env->uncached_cpsr & CPSR_M;
516 
517     if (regno == 17) {
518         /* ELR_Hyp: a special case because access from tgtmode is OK */
519         if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
520             goto undef;
521         }
522         return;
523     }
524 
525     if (curmode == tgtmode) {
526         goto undef;
527     }
528 
529     if (tgtmode == ARM_CPU_MODE_USR) {
530         switch (regno) {
531         case 8 ... 12:
532             if (curmode != ARM_CPU_MODE_FIQ) {
533                 goto undef;
534             }
535             break;
536         case 13:
537             if (curmode == ARM_CPU_MODE_SYS) {
538                 goto undef;
539             }
540             break;
541         case 14:
542             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
543                 goto undef;
544             }
545             break;
546         default:
547             break;
548         }
549     }
550 
551     if (tgtmode == ARM_CPU_MODE_HYP) {
552         /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
553         if (curmode != ARM_CPU_MODE_MON) {
554             goto undef;
555         }
556     }
557 
558     return;
559 
560 undef:
561     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
562                     exception_target_el(env));
563 }
564 
565 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
566                         uint32_t regno)
567 {
568     msr_mrs_banked_exc_checks(env, tgtmode, regno);
569 
570     switch (regno) {
571     case 16: /* SPSRs */
572         env->banked_spsr[bank_number(tgtmode)] = value;
573         break;
574     case 17: /* ELR_Hyp */
575         env->elr_el[2] = value;
576         break;
577     case 13:
578         env->banked_r13[bank_number(tgtmode)] = value;
579         break;
580     case 14:
581         env->banked_r14[r14_bank_number(tgtmode)] = value;
582         break;
583     case 8 ... 12:
584         switch (tgtmode) {
585         case ARM_CPU_MODE_USR:
586             env->usr_regs[regno - 8] = value;
587             break;
588         case ARM_CPU_MODE_FIQ:
589             env->fiq_regs[regno - 8] = value;
590             break;
591         default:
592             g_assert_not_reached();
593         }
594         break;
595     default:
596         g_assert_not_reached();
597     }
598 }
599 
600 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
601 {
602     msr_mrs_banked_exc_checks(env, tgtmode, regno);
603 
604     switch (regno) {
605     case 16: /* SPSRs */
606         return env->banked_spsr[bank_number(tgtmode)];
607     case 17: /* ELR_Hyp */
608         return env->elr_el[2];
609     case 13:
610         return env->banked_r13[bank_number(tgtmode)];
611     case 14:
612         return env->banked_r14[r14_bank_number(tgtmode)];
613     case 8 ... 12:
614         switch (tgtmode) {
615         case ARM_CPU_MODE_USR:
616             return env->usr_regs[regno - 8];
617         case ARM_CPU_MODE_FIQ:
618             return env->fiq_regs[regno - 8];
619         default:
620             g_assert_not_reached();
621         }
622     default:
623         g_assert_not_reached();
624     }
625 }
626 
627 const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
628                                         uint32_t syndrome, uint32_t isread)
629 {
630     ARMCPU *cpu = env_archcpu(env);
631     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
632     CPAccessResult res = CP_ACCESS_OK;
633     int target_el;
634 
635     assert(ri != NULL);
636 
637     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
638         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
639         res = CP_ACCESS_TRAP;
640         goto fail;
641     }
642 
643     if (ri->accessfn) {
644         res = ri->accessfn(env, ri, isread);
645     }
646 
647     /*
648      * If the access function indicates a trap from EL0 to EL1 then
649      * that always takes priority over the HSTR_EL2 trap. (If it indicates
650      * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
651      * a trap to EL2, then the syndrome is the same either way so we don't
652      * care whether technically the architecture says that HSTR_EL2 trap or
653      * the other trap takes priority. So we take the "check HSTR_EL2" path
654      * for all of those cases.)
655      */
656     if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) &&
657         arm_current_el(env) == 0) {
658         goto fail;
659     }
660 
661     /*
662      * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
663      * we only need to check here for traps from EL0.
664      */
665     if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
666         arm_is_el2_enabled(env) &&
667         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
668         uint32_t mask = 1 << ri->crn;
669 
670         if (ri->type & ARM_CP_64BIT) {
671             mask = 1 << ri->crm;
672         }
673 
674         /* T4 and T14 are RES0 */
675         mask &= ~((1 << 4) | (1 << 14));
676 
677         if (env->cp15.hstr_el2 & mask) {
678             res = CP_ACCESS_TRAP_EL2;
679             goto fail;
680         }
681     }
682 
683     /*
684      * Fine-grained traps also are lower priority than undef-to-EL1,
685      * higher priority than trap-to-EL3, and we don't care about priority
686      * order with other EL2 traps because the syndrome value is the same.
687      */
688     if (arm_fgt_active(env, arm_current_el(env))) {
689         uint64_t trapword = 0;
690         unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
691         unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
692         bool rev = FIELD_EX32(ri->fgt, FGT, REV);
693         bool trapbit;
694 
695         if (ri->fgt & FGT_EXEC) {
696             assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
697             trapword = env->cp15.fgt_exec[idx];
698         } else if (isread && (ri->fgt & FGT_R)) {
699             assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
700             trapword = env->cp15.fgt_read[idx];
701         } else if (!isread && (ri->fgt & FGT_W)) {
702             assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
703             trapword = env->cp15.fgt_write[idx];
704         }
705 
706         trapbit = extract64(trapword, bitpos, 1);
707         if (trapbit != rev) {
708             res = CP_ACCESS_TRAP_EL2;
709             goto fail;
710         }
711     }
712 
713     if (likely(res == CP_ACCESS_OK)) {
714         return ri;
715     }
716 
717  fail:
718     switch (res & ~CP_ACCESS_EL_MASK) {
719     case CP_ACCESS_TRAP:
720         break;
721     case CP_ACCESS_TRAP_UNCATEGORIZED:
722         /* Only CP_ACCESS_TRAP traps are direct to a specified EL */
723         assert((res & CP_ACCESS_EL_MASK) == 0);
724         if (cpu_isar_feature(aa64_ids, cpu) && isread &&
725             arm_cpreg_in_idspace(ri)) {
726             /*
727              * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
728              * not EC_UNCATEGORIZED
729              */
730             break;
731         }
732         syndrome = syn_uncategorized();
733         break;
734     default:
735         g_assert_not_reached();
736     }
737 
738     target_el = res & CP_ACCESS_EL_MASK;
739     switch (target_el) {
740     case 0:
741         target_el = exception_target_el(env);
742         break;
743     case 2:
744         assert(arm_current_el(env) != 3);
745         assert(arm_is_el2_enabled(env));
746         break;
747     case 3:
748         assert(arm_feature(env, ARM_FEATURE_EL3));
749         break;
750     default:
751         /* No "direct" traps to EL1 */
752         g_assert_not_reached();
753     }
754 
755     raise_exception(env, EXCP_UDEF, syndrome, target_el);
756 }
757 
758 const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
759 {
760     ARMCPU *cpu = env_archcpu(env);
761     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
762 
763     assert(ri != NULL);
764     return ri;
765 }
766 
767 /*
768  * Test for HCR_EL2.TIDCP at EL1.
769  * Since implementation defined registers are rare, and within QEMU
770  * most of them are no-op, do not waste HFLAGS space for this and
771  * always use a helper.
772  */
773 void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
774 {
775     if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
776         raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
777     }
778 }
779 
780 /*
781  * Similarly, for FEAT_TIDCP1 at EL0.
782  * We have already checked for the presence of the feature.
783  */
784 void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
785 {
786     /* See arm_sctlr(), but we also need the sctlr el. */
787     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
788     int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
789 
790     /*
791      * The bit is not valid unless the target el is aa64, but since the
792      * bit test is simpler perform that first and check validity after.
793      */
794     if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
795         && arm_el_is_aa64(env, target_el)) {
796         raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
797     }
798 }
799 
800 void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
801 {
802     const ARMCPRegInfo *ri = rip;
803 
804     if (ri->type & ARM_CP_IO) {
805         qemu_mutex_lock_iothread();
806         ri->writefn(env, ri, value);
807         qemu_mutex_unlock_iothread();
808     } else {
809         ri->writefn(env, ri, value);
810     }
811 }
812 
813 uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
814 {
815     const ARMCPRegInfo *ri = rip;
816     uint32_t res;
817 
818     if (ri->type & ARM_CP_IO) {
819         qemu_mutex_lock_iothread();
820         res = ri->readfn(env, ri);
821         qemu_mutex_unlock_iothread();
822     } else {
823         res = ri->readfn(env, ri);
824     }
825 
826     return res;
827 }
828 
829 void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
830 {
831     const ARMCPRegInfo *ri = rip;
832 
833     if (ri->type & ARM_CP_IO) {
834         qemu_mutex_lock_iothread();
835         ri->writefn(env, ri, value);
836         qemu_mutex_unlock_iothread();
837     } else {
838         ri->writefn(env, ri, value);
839     }
840 }
841 
842 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
843 {
844     const ARMCPRegInfo *ri = rip;
845     uint64_t res;
846 
847     if (ri->type & ARM_CP_IO) {
848         qemu_mutex_lock_iothread();
849         res = ri->readfn(env, ri);
850         qemu_mutex_unlock_iothread();
851     } else {
852         res = ri->readfn(env, ri);
853     }
854 
855     return res;
856 }
857 
858 void HELPER(pre_hvc)(CPUARMState *env)
859 {
860     ARMCPU *cpu = env_archcpu(env);
861     int cur_el = arm_current_el(env);
862     /* FIXME: Use actual secure state.  */
863     bool secure = false;
864     bool undef;
865 
866     if (arm_is_psci_call(cpu, EXCP_HVC)) {
867         /* If PSCI is enabled and this looks like a valid PSCI call then
868          * that overrides the architecturally mandated HVC behaviour.
869          */
870         return;
871     }
872 
873     if (!arm_feature(env, ARM_FEATURE_EL2)) {
874         /* If EL2 doesn't exist, HVC always UNDEFs */
875         undef = true;
876     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
877         /* EL3.HCE has priority over EL2.HCD. */
878         undef = !(env->cp15.scr_el3 & SCR_HCE);
879     } else {
880         undef = env->cp15.hcr_el2 & HCR_HCD;
881     }
882 
883     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
884      * For ARMv8/AArch64, HVC is allowed in EL3.
885      * Note that we've already trapped HVC from EL0 at translation
886      * time.
887      */
888     if (secure && (!is_a64(env) || cur_el == 1)) {
889         undef = true;
890     }
891 
892     if (undef) {
893         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
894                         exception_target_el(env));
895     }
896 }
897 
898 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
899 {
900     ARMCPU *cpu = env_archcpu(env);
901     int cur_el = arm_current_el(env);
902     bool secure = arm_is_secure(env);
903     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
904 
905     /*
906      * SMC behaviour is summarized in the following table.
907      * This helper handles the "Trap to EL2" and "Undef insn" cases.
908      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
909      * helper.
910      *
911      *  -> ARM_FEATURE_EL3 and !SMD
912      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
913      *
914      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
915      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
916      *  Conduit not SMC          Trap to EL2         Trap to EL3
917      *
918      *
919      *  -> ARM_FEATURE_EL3 and SMD
920      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
921      *
922      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
923      *  Conduit SMC, inval call  Trap to EL2         Undef insn
924      *  Conduit not SMC          Trap to EL2         Undef insn
925      *
926      *
927      *  -> !ARM_FEATURE_EL3
928      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
929      *
930      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
931      *  Conduit SMC, inval call  Trap to EL2         Undef insn
932      *  Conduit not SMC          Undef insn          Undef insn
933      */
934 
935     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
936      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
937      *  extensions, SMD only applies to NS state.
938      * On ARMv7 without the Virtualization extensions, the SMD bit
939      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
940      * so we need not special case this here.
941      */
942     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
943                                                      : smd_flag && !secure;
944 
945     if (!arm_feature(env, ARM_FEATURE_EL3) &&
946         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
947         /* If we have no EL3 then SMC always UNDEFs and can't be
948          * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
949          * firmware within QEMU, and we want an EL2 guest to be able
950          * to forbid its EL1 from making PSCI calls into QEMU's
951          * "firmware" via HCR.TSC, so for these purposes treat
952          * PSCI-via-SMC as implying an EL3.
953          * This handles the very last line of the previous table.
954          */
955         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
956                         exception_target_el(env));
957     }
958 
959     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
960         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
961          * We also want an EL2 guest to be able to forbid its EL1 from
962          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
963          * This handles all the "Trap to EL2" cases of the previous table.
964          */
965         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
966     }
967 
968     /* Catch the two remaining "Undef insn" cases of the previous table:
969      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
970      *    - We don't have EL3 or SMD is set.
971      */
972     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
973         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
974         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
975                         exception_target_el(env));
976     }
977 }
978 
979 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
980    The only way to do that in TCG is a conditional branch, which clobbers
981    all our temporaries.  For now implement these as helper functions.  */
982 
983 /* Similarly for variable shift instructions.  */
984 
985 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
986 {
987     int shift = i & 0xff;
988     if (shift >= 32) {
989         if (shift == 32)
990             env->CF = x & 1;
991         else
992             env->CF = 0;
993         return 0;
994     } else if (shift != 0) {
995         env->CF = (x >> (32 - shift)) & 1;
996         return x << shift;
997     }
998     return x;
999 }
1000 
1001 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1002 {
1003     int shift = i & 0xff;
1004     if (shift >= 32) {
1005         if (shift == 32)
1006             env->CF = (x >> 31) & 1;
1007         else
1008             env->CF = 0;
1009         return 0;
1010     } else if (shift != 0) {
1011         env->CF = (x >> (shift - 1)) & 1;
1012         return x >> shift;
1013     }
1014     return x;
1015 }
1016 
1017 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1018 {
1019     int shift = i & 0xff;
1020     if (shift >= 32) {
1021         env->CF = (x >> 31) & 1;
1022         return (int32_t)x >> 31;
1023     } else if (shift != 0) {
1024         env->CF = (x >> (shift - 1)) & 1;
1025         return (int32_t)x >> shift;
1026     }
1027     return x;
1028 }
1029 
1030 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1031 {
1032     int shift1, shift;
1033     shift1 = i & 0xff;
1034     shift = shift1 & 0x1f;
1035     if (shift == 0) {
1036         if (shift1 != 0)
1037             env->CF = (x >> 31) & 1;
1038         return x;
1039     } else {
1040         env->CF = (x >> (shift - 1)) & 1;
1041         return ((uint32_t)x >> shift) | (x << (32 - shift));
1042     }
1043 }
1044 
1045 void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
1046                           uint32_t access_type, uint32_t mmu_idx,
1047                           uint32_t size)
1048 {
1049     uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1050     uintptr_t ra = GETPC();
1051 
1052     if (likely(size <= in_page)) {
1053         probe_access(env, ptr, size, access_type, mmu_idx, ra);
1054     } else {
1055         probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1056         probe_access(env, ptr + in_page, size - in_page,
1057                      access_type, mmu_idx, ra);
1058     }
1059 }
1060 
1061 /*
1062  * This function corresponds to AArch64.vESBOperation().
1063  * Note that the AArch32 version is not functionally different.
1064  */
1065 void HELPER(vesb)(CPUARMState *env)
1066 {
1067     /*
1068      * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1069      * and will return HCR_EL2.VSE == 0, so nothing happens.
1070      */
1071     uint64_t hcr = arm_hcr_el2_eff(env);
1072     bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1073     bool pending = enabled && (hcr & HCR_VSE);
1074     bool masked  = (env->daif & PSTATE_A);
1075 
1076     /* If VSE pending and masked, defer the exception.  */
1077     if (pending && masked) {
1078         uint32_t syndrome;
1079 
1080         if (arm_el_is_aa64(env, 1)) {
1081             /* Copy across IDS and ISS from VSESR. */
1082             syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1083         } else {
1084             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1085 
1086             if (extended_addresses_enabled(env)) {
1087                 syndrome = arm_fi_to_lfsc(&fi);
1088             } else {
1089                 syndrome = arm_fi_to_sfsc(&fi);
1090             }
1091             /* Copy across AET and ExT from VSESR. */
1092             syndrome |= env->cp15.vsesr_el2 & 0xd000;
1093         }
1094 
1095         /* Set VDISR_EL2.A along with the syndrome. */
1096         env->cp15.vdisr_el2 = syndrome | (1u << 31);
1097 
1098         /* Clear pending virtual SError */
1099         env->cp15.hcr_el2 &= ~HCR_VSE;
1100         cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1101     }
1102 }
1103