xref: /openbmc/qemu/target/arm/tcg/op_helper.c (revision 6c7937ec)
1 /*
2  *  ARM helper routines
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery, LLC
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "qemu/osdep.h"
20 #include "qemu/main-loop.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "internals.h"
24 #include "cpu-features.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "cpregs.h"
28 
29 #define SIGNBIT (uint32_t)0x80000000
30 #define SIGNBIT64 ((uint64_t)1 << 63)
31 
32 int exception_target_el(CPUARMState *env)
33 {
34     int target_el = MAX(1, arm_current_el(env));
35 
36     /*
37      * No such thing as secure EL1 if EL3 is aarch32,
38      * so update the target EL to EL3 in this case.
39      */
40     if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
41         target_el = 3;
42     }
43 
44     return target_el;
45 }
46 
47 void raise_exception(CPUARMState *env, uint32_t excp,
48                      uint32_t syndrome, uint32_t target_el)
49 {
50     CPUState *cs = env_cpu(env);
51 
52     if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
53         /*
54          * Redirect NS EL1 exceptions to NS EL2. These are reported with
55          * their original syndrome register value, with the exception of
56          * SIMD/FP access traps, which are reported as uncategorized
57          * (see DDI0478C.a D1.10.4)
58          */
59         target_el = 2;
60         if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
61             syndrome = syn_uncategorized();
62         }
63     }
64 
65     assert(!excp_is_internal(excp));
66     cs->exception_index = excp;
67     env->exception.syndrome = syndrome;
68     env->exception.target_el = target_el;
69     cpu_loop_exit(cs);
70 }
71 
72 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
73                         uint32_t target_el, uintptr_t ra)
74 {
75     CPUState *cs = env_cpu(env);
76 
77     /*
78      * restore_state_to_opc() will set env->exception.syndrome, so
79      * we must restore CPU state here before setting the syndrome
80      * the caller passed us, and cannot use cpu_loop_exit_restore().
81      */
82     cpu_restore_state(cs, ra);
83     raise_exception(env, excp, syndrome, target_el);
84 }
85 
86 uint64_t HELPER(neon_tbl)(CPUARMState *env, uint32_t desc,
87                           uint64_t ireg, uint64_t def)
88 {
89     uint64_t tmp, val = 0;
90     uint32_t maxindex = ((desc & 3) + 1) * 8;
91     uint32_t base_reg = desc >> 2;
92     uint32_t shift, index, reg;
93 
94     for (shift = 0; shift < 64; shift += 8) {
95         index = (ireg >> shift) & 0xff;
96         if (index < maxindex) {
97             reg = base_reg + (index >> 3);
98             tmp = *aa32_vfp_dreg(env, reg);
99             tmp = ((tmp >> ((index & 7) << 3)) & 0xff) << shift;
100         } else {
101             tmp = def & (0xffull << shift);
102         }
103         val |= tmp;
104     }
105     return val;
106 }
107 
108 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
109 {
110     /*
111      * Perform the v8M stack limit check for SP updates from translated code,
112      * raising an exception if the limit is breached.
113      */
114     if (newvalue < v7m_sp_limit(env)) {
115         /*
116          * Stack limit exceptions are a rare case, so rather than syncing
117          * PC/condbits before the call, we use raise_exception_ra() so
118          * that cpu_restore_state() will sort them out.
119          */
120         raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
121     }
122 }
123 
124 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
125 {
126     uint32_t res = a + b;
127     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
128         env->QF = 1;
129     return res;
130 }
131 
132 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
133 {
134     uint32_t res = a + b;
135     if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
136         env->QF = 1;
137         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
138     }
139     return res;
140 }
141 
142 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
143 {
144     uint32_t res = a - b;
145     if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
146         env->QF = 1;
147         res = ~(((int32_t)a >> 31) ^ SIGNBIT);
148     }
149     return res;
150 }
151 
152 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
153 {
154     uint32_t res = a + b;
155     if (res < a) {
156         env->QF = 1;
157         res = ~0;
158     }
159     return res;
160 }
161 
162 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
163 {
164     uint32_t res = a - b;
165     if (res > a) {
166         env->QF = 1;
167         res = 0;
168     }
169     return res;
170 }
171 
172 /* Signed saturation.  */
173 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
174 {
175     int32_t top;
176     uint32_t mask;
177 
178     top = val >> shift;
179     mask = (1u << shift) - 1;
180     if (top > 0) {
181         env->QF = 1;
182         return mask;
183     } else if (top < -1) {
184         env->QF = 1;
185         return ~mask;
186     }
187     return val;
188 }
189 
190 /* Unsigned saturation.  */
191 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
192 {
193     uint32_t max;
194 
195     max = (1u << shift) - 1;
196     if (val < 0) {
197         env->QF = 1;
198         return 0;
199     } else if (val > max) {
200         env->QF = 1;
201         return max;
202     }
203     return val;
204 }
205 
206 /* Signed saturate.  */
207 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
208 {
209     return do_ssat(env, x, shift);
210 }
211 
212 /* Dual halfword signed saturate.  */
213 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
214 {
215     uint32_t res;
216 
217     res = (uint16_t)do_ssat(env, (int16_t)x, shift);
218     res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
219     return res;
220 }
221 
222 /* Unsigned saturate.  */
223 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
224 {
225     return do_usat(env, x, shift);
226 }
227 
228 /* Dual halfword unsigned saturate.  */
229 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
230 {
231     uint32_t res;
232 
233     res = (uint16_t)do_usat(env, (int16_t)x, shift);
234     res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
235     return res;
236 }
237 
238 void HELPER(setend)(CPUARMState *env)
239 {
240     env->uncached_cpsr ^= CPSR_E;
241     arm_rebuild_hflags(env);
242 }
243 
244 void HELPER(check_bxj_trap)(CPUARMState *env, uint32_t rm)
245 {
246     /*
247      * Only called if in NS EL0 or EL1 for a BXJ for a v7A CPU;
248      * check if HSTR.TJDBX means we need to trap to EL2.
249      */
250     if (env->cp15.hstr_el2 & HSTR_TJDBX) {
251         /*
252          * We know the condition code check passed, so take the IMPDEF
253          * choice to always report CV=1 COND 0xe
254          */
255         uint32_t syn = syn_bxjtrap(1, 0xe, rm);
256         raise_exception_ra(env, EXCP_HYP_TRAP, syn, 2, GETPC());
257     }
258 }
259 
260 #ifndef CONFIG_USER_ONLY
261 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
262  * The function returns the target EL (1-3) if the instruction is to be trapped;
263  * otherwise it returns 0 indicating it is not trapped.
264  */
265 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
266 {
267     int cur_el = arm_current_el(env);
268     uint64_t mask;
269 
270     if (arm_feature(env, ARM_FEATURE_M)) {
271         /* M profile cores can never trap WFI/WFE. */
272         return 0;
273     }
274 
275     /* If we are currently in EL0 then we need to check if SCTLR is set up for
276      * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
277      */
278     if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
279         int target_el;
280 
281         mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
282         if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
283             /* Secure EL0 and Secure PL1 is at EL3 */
284             target_el = 3;
285         } else {
286             target_el = 1;
287         }
288 
289         if (!(env->cp15.sctlr_el[target_el] & mask)) {
290             return target_el;
291         }
292     }
293 
294     /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
295      * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
296      * bits will be zero indicating no trap.
297      */
298     if (cur_el < 2) {
299         mask = is_wfe ? HCR_TWE : HCR_TWI;
300         if (arm_hcr_el2_eff(env) & mask) {
301             return 2;
302         }
303     }
304 
305     /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
306     if (cur_el < 3) {
307         mask = (is_wfe) ? SCR_TWE : SCR_TWI;
308         if (env->cp15.scr_el3 & mask) {
309             return 3;
310         }
311     }
312 
313     return 0;
314 }
315 #endif
316 
317 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
318 {
319 #ifdef CONFIG_USER_ONLY
320     /*
321      * WFI in the user-mode emulator is technically permitted but not
322      * something any real-world code would do. AArch64 Linux kernels
323      * trap it via SCTRL_EL1.nTWI and make it an (expensive) NOP;
324      * AArch32 kernels don't trap it so it will delay a bit.
325      * For QEMU, make it NOP here, because trying to raise EXCP_HLT
326      * would trigger an abort.
327      */
328     return;
329 #else
330     CPUState *cs = env_cpu(env);
331     int target_el = check_wfx_trap(env, false);
332 
333     if (cpu_has_work(cs)) {
334         /* Don't bother to go into our "low power state" if
335          * we would just wake up immediately.
336          */
337         return;
338     }
339 
340     if (target_el) {
341         if (env->aarch64) {
342             env->pc -= insn_len;
343         } else {
344             env->regs[15] -= insn_len;
345         }
346 
347         raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
348                         target_el);
349     }
350 
351     cs->exception_index = EXCP_HLT;
352     cs->halted = 1;
353     cpu_loop_exit(cs);
354 #endif
355 }
356 
357 void HELPER(wfe)(CPUARMState *env)
358 {
359     /* This is a hint instruction that is semantically different
360      * from YIELD even though we currently implement it identically.
361      * Don't actually halt the CPU, just yield back to top
362      * level loop. This is not going into a "low power state"
363      * (ie halting until some event occurs), so we never take
364      * a configurable trap to a different exception level.
365      */
366     HELPER(yield)(env);
367 }
368 
369 void HELPER(yield)(CPUARMState *env)
370 {
371     CPUState *cs = env_cpu(env);
372 
373     /* This is a non-trappable hint instruction that generally indicates
374      * that the guest is currently busy-looping. Yield control back to the
375      * top level loop so that a more deserving VCPU has a chance to run.
376      */
377     cs->exception_index = EXCP_YIELD;
378     cpu_loop_exit(cs);
379 }
380 
381 /* Raise an internal-to-QEMU exception. This is limited to only
382  * those EXCP values which are special cases for QEMU to interrupt
383  * execution and not to be used for exceptions which are passed to
384  * the guest (those must all have syndrome information and thus should
385  * use exception_with_syndrome*).
386  */
387 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
388 {
389     CPUState *cs = env_cpu(env);
390 
391     assert(excp_is_internal(excp));
392     cs->exception_index = excp;
393     cpu_loop_exit(cs);
394 }
395 
396 /* Raise an exception with the specified syndrome register value */
397 void HELPER(exception_with_syndrome_el)(CPUARMState *env, uint32_t excp,
398                                         uint32_t syndrome, uint32_t target_el)
399 {
400     raise_exception(env, excp, syndrome, target_el);
401 }
402 
403 /*
404  * Raise an exception with the specified syndrome register value
405  * to the default target el.
406  */
407 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
408                                      uint32_t syndrome)
409 {
410     raise_exception(env, excp, syndrome, exception_target_el(env));
411 }
412 
413 uint32_t HELPER(cpsr_read)(CPUARMState *env)
414 {
415     return cpsr_read(env) & ~CPSR_EXEC;
416 }
417 
418 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
419 {
420     cpsr_write(env, val, mask, CPSRWriteByInstr);
421     /* TODO: Not all cpsr bits are relevant to hflags.  */
422     arm_rebuild_hflags(env);
423 }
424 
425 /* Write the CPSR for a 32-bit exception return */
426 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
427 {
428     uint32_t mask;
429 
430     qemu_mutex_lock_iothread();
431     arm_call_pre_el_change_hook(env_archcpu(env));
432     qemu_mutex_unlock_iothread();
433 
434     mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
435     cpsr_write(env, val, mask, CPSRWriteExceptionReturn);
436 
437     /* Generated code has already stored the new PC value, but
438      * without masking out its low bits, because which bits need
439      * masking depends on whether we're returning to Thumb or ARM
440      * state. Do the masking now.
441      */
442     env->regs[15] &= (env->thumb ? ~1 : ~3);
443     arm_rebuild_hflags(env);
444 
445     qemu_mutex_lock_iothread();
446     arm_call_el_change_hook(env_archcpu(env));
447     qemu_mutex_unlock_iothread();
448 }
449 
450 /* Access to user mode registers from privileged modes.  */
451 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
452 {
453     uint32_t val;
454 
455     if (regno == 13) {
456         val = env->banked_r13[BANK_USRSYS];
457     } else if (regno == 14) {
458         val = env->banked_r14[BANK_USRSYS];
459     } else if (regno >= 8
460                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
461         val = env->usr_regs[regno - 8];
462     } else {
463         val = env->regs[regno];
464     }
465     return val;
466 }
467 
468 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
469 {
470     if (regno == 13) {
471         env->banked_r13[BANK_USRSYS] = val;
472     } else if (regno == 14) {
473         env->banked_r14[BANK_USRSYS] = val;
474     } else if (regno >= 8
475                && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
476         env->usr_regs[regno - 8] = val;
477     } else {
478         env->regs[regno] = val;
479     }
480 }
481 
482 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
483 {
484     if ((env->uncached_cpsr & CPSR_M) == mode) {
485         env->regs[13] = val;
486     } else {
487         env->banked_r13[bank_number(mode)] = val;
488     }
489 }
490 
491 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
492 {
493     if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
494         /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
495          * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
496          */
497         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
498                         exception_target_el(env));
499     }
500 
501     if ((env->uncached_cpsr & CPSR_M) == mode) {
502         return env->regs[13];
503     } else {
504         return env->banked_r13[bank_number(mode)];
505     }
506 }
507 
508 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
509                                       uint32_t regno)
510 {
511     /* Raise an exception if the requested access is one of the UNPREDICTABLE
512      * cases; otherwise return. This broadly corresponds to the pseudocode
513      * BankedRegisterAccessValid() and SPSRAccessValid(),
514      * except that we have already handled some cases at translate time.
515      */
516     int curmode = env->uncached_cpsr & CPSR_M;
517 
518     if (regno == 17) {
519         /* ELR_Hyp: a special case because access from tgtmode is OK */
520         if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
521             goto undef;
522         }
523         return;
524     }
525 
526     if (curmode == tgtmode) {
527         goto undef;
528     }
529 
530     if (tgtmode == ARM_CPU_MODE_USR) {
531         switch (regno) {
532         case 8 ... 12:
533             if (curmode != ARM_CPU_MODE_FIQ) {
534                 goto undef;
535             }
536             break;
537         case 13:
538             if (curmode == ARM_CPU_MODE_SYS) {
539                 goto undef;
540             }
541             break;
542         case 14:
543             if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
544                 goto undef;
545             }
546             break;
547         default:
548             break;
549         }
550     }
551 
552     if (tgtmode == ARM_CPU_MODE_HYP) {
553         /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
554         if (curmode != ARM_CPU_MODE_MON) {
555             goto undef;
556         }
557     }
558 
559     return;
560 
561 undef:
562     raise_exception(env, EXCP_UDEF, syn_uncategorized(),
563                     exception_target_el(env));
564 }
565 
566 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
567                         uint32_t regno)
568 {
569     msr_mrs_banked_exc_checks(env, tgtmode, regno);
570 
571     switch (regno) {
572     case 16: /* SPSRs */
573         env->banked_spsr[bank_number(tgtmode)] = value;
574         break;
575     case 17: /* ELR_Hyp */
576         env->elr_el[2] = value;
577         break;
578     case 13:
579         env->banked_r13[bank_number(tgtmode)] = value;
580         break;
581     case 14:
582         env->banked_r14[r14_bank_number(tgtmode)] = value;
583         break;
584     case 8 ... 12:
585         switch (tgtmode) {
586         case ARM_CPU_MODE_USR:
587             env->usr_regs[regno - 8] = value;
588             break;
589         case ARM_CPU_MODE_FIQ:
590             env->fiq_regs[regno - 8] = value;
591             break;
592         default:
593             g_assert_not_reached();
594         }
595         break;
596     default:
597         g_assert_not_reached();
598     }
599 }
600 
601 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
602 {
603     msr_mrs_banked_exc_checks(env, tgtmode, regno);
604 
605     switch (regno) {
606     case 16: /* SPSRs */
607         return env->banked_spsr[bank_number(tgtmode)];
608     case 17: /* ELR_Hyp */
609         return env->elr_el[2];
610     case 13:
611         return env->banked_r13[bank_number(tgtmode)];
612     case 14:
613         return env->banked_r14[r14_bank_number(tgtmode)];
614     case 8 ... 12:
615         switch (tgtmode) {
616         case ARM_CPU_MODE_USR:
617             return env->usr_regs[regno - 8];
618         case ARM_CPU_MODE_FIQ:
619             return env->fiq_regs[regno - 8];
620         default:
621             g_assert_not_reached();
622         }
623     default:
624         g_assert_not_reached();
625     }
626 }
627 
628 const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key,
629                                         uint32_t syndrome, uint32_t isread)
630 {
631     ARMCPU *cpu = env_archcpu(env);
632     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
633     CPAccessResult res = CP_ACCESS_OK;
634     int target_el;
635 
636     assert(ri != NULL);
637 
638     if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
639         && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
640         res = CP_ACCESS_TRAP;
641         goto fail;
642     }
643 
644     if (ri->accessfn) {
645         res = ri->accessfn(env, ri, isread);
646     }
647 
648     /*
649      * If the access function indicates a trap from EL0 to EL1 then
650      * that always takes priority over the HSTR_EL2 trap. (If it indicates
651      * a trap to EL3, then the HSTR_EL2 trap takes priority; if it indicates
652      * a trap to EL2, then the syndrome is the same either way so we don't
653      * care whether technically the architecture says that HSTR_EL2 trap or
654      * the other trap takes priority. So we take the "check HSTR_EL2" path
655      * for all of those cases.)
656      */
657     if (res != CP_ACCESS_OK && ((res & CP_ACCESS_EL_MASK) == 0) &&
658         arm_current_el(env) == 0) {
659         goto fail;
660     }
661 
662     /*
663      * HSTR_EL2 traps from EL1 are checked earlier, in generated code;
664      * we only need to check here for traps from EL0.
665      */
666     if (!is_a64(env) && arm_current_el(env) == 0 && ri->cp == 15 &&
667         arm_is_el2_enabled(env) &&
668         (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
669         uint32_t mask = 1 << ri->crn;
670 
671         if (ri->type & ARM_CP_64BIT) {
672             mask = 1 << ri->crm;
673         }
674 
675         /* T4 and T14 are RES0 */
676         mask &= ~((1 << 4) | (1 << 14));
677 
678         if (env->cp15.hstr_el2 & mask) {
679             res = CP_ACCESS_TRAP_EL2;
680             goto fail;
681         }
682     }
683 
684     /*
685      * Fine-grained traps also are lower priority than undef-to-EL1,
686      * higher priority than trap-to-EL3, and we don't care about priority
687      * order with other EL2 traps because the syndrome value is the same.
688      */
689     if (arm_fgt_active(env, arm_current_el(env))) {
690         uint64_t trapword = 0;
691         unsigned int idx = FIELD_EX32(ri->fgt, FGT, IDX);
692         unsigned int bitpos = FIELD_EX32(ri->fgt, FGT, BITPOS);
693         bool rev = FIELD_EX32(ri->fgt, FGT, REV);
694         bool trapbit;
695 
696         if (ri->fgt & FGT_EXEC) {
697             assert(idx < ARRAY_SIZE(env->cp15.fgt_exec));
698             trapword = env->cp15.fgt_exec[idx];
699         } else if (isread && (ri->fgt & FGT_R)) {
700             assert(idx < ARRAY_SIZE(env->cp15.fgt_read));
701             trapword = env->cp15.fgt_read[idx];
702         } else if (!isread && (ri->fgt & FGT_W)) {
703             assert(idx < ARRAY_SIZE(env->cp15.fgt_write));
704             trapword = env->cp15.fgt_write[idx];
705         }
706 
707         trapbit = extract64(trapword, bitpos, 1);
708         if (trapbit != rev) {
709             res = CP_ACCESS_TRAP_EL2;
710             goto fail;
711         }
712     }
713 
714     if (likely(res == CP_ACCESS_OK)) {
715         return ri;
716     }
717 
718  fail:
719     switch (res & ~CP_ACCESS_EL_MASK) {
720     case CP_ACCESS_TRAP:
721         break;
722     case CP_ACCESS_TRAP_UNCATEGORIZED:
723         /* Only CP_ACCESS_TRAP traps are direct to a specified EL */
724         assert((res & CP_ACCESS_EL_MASK) == 0);
725         if (cpu_isar_feature(aa64_ids, cpu) && isread &&
726             arm_cpreg_in_idspace(ri)) {
727             /*
728              * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
729              * not EC_UNCATEGORIZED
730              */
731             break;
732         }
733         syndrome = syn_uncategorized();
734         break;
735     default:
736         g_assert_not_reached();
737     }
738 
739     target_el = res & CP_ACCESS_EL_MASK;
740     switch (target_el) {
741     case 0:
742         target_el = exception_target_el(env);
743         break;
744     case 2:
745         assert(arm_current_el(env) != 3);
746         assert(arm_is_el2_enabled(env));
747         break;
748     case 3:
749         assert(arm_feature(env, ARM_FEATURE_EL3));
750         break;
751     default:
752         /* No "direct" traps to EL1 */
753         g_assert_not_reached();
754     }
755 
756     raise_exception(env, EXCP_UDEF, syndrome, target_el);
757 }
758 
759 const void *HELPER(lookup_cp_reg)(CPUARMState *env, uint32_t key)
760 {
761     ARMCPU *cpu = env_archcpu(env);
762     const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, key);
763 
764     assert(ri != NULL);
765     return ri;
766 }
767 
768 /*
769  * Test for HCR_EL2.TIDCP at EL1.
770  * Since implementation defined registers are rare, and within QEMU
771  * most of them are no-op, do not waste HFLAGS space for this and
772  * always use a helper.
773  */
774 void HELPER(tidcp_el1)(CPUARMState *env, uint32_t syndrome)
775 {
776     if (arm_hcr_el2_eff(env) & HCR_TIDCP) {
777         raise_exception_ra(env, EXCP_UDEF, syndrome, 2, GETPC());
778     }
779 }
780 
781 /*
782  * Similarly, for FEAT_TIDCP1 at EL0.
783  * We have already checked for the presence of the feature.
784  */
785 void HELPER(tidcp_el0)(CPUARMState *env, uint32_t syndrome)
786 {
787     /* See arm_sctlr(), but we also need the sctlr el. */
788     ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0);
789     int target_el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1;
790 
791     /*
792      * The bit is not valid unless the target el is aa64, but since the
793      * bit test is simpler perform that first and check validity after.
794      */
795     if ((env->cp15.sctlr_el[target_el] & SCTLR_TIDCP)
796         && arm_el_is_aa64(env, target_el)) {
797         raise_exception_ra(env, EXCP_UDEF, syndrome, target_el, GETPC());
798     }
799 }
800 
801 void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value)
802 {
803     const ARMCPRegInfo *ri = rip;
804 
805     if (ri->type & ARM_CP_IO) {
806         qemu_mutex_lock_iothread();
807         ri->writefn(env, ri, value);
808         qemu_mutex_unlock_iothread();
809     } else {
810         ri->writefn(env, ri, value);
811     }
812 }
813 
814 uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip)
815 {
816     const ARMCPRegInfo *ri = rip;
817     uint32_t res;
818 
819     if (ri->type & ARM_CP_IO) {
820         qemu_mutex_lock_iothread();
821         res = ri->readfn(env, ri);
822         qemu_mutex_unlock_iothread();
823     } else {
824         res = ri->readfn(env, ri);
825     }
826 
827     return res;
828 }
829 
830 void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value)
831 {
832     const ARMCPRegInfo *ri = rip;
833 
834     if (ri->type & ARM_CP_IO) {
835         qemu_mutex_lock_iothread();
836         ri->writefn(env, ri, value);
837         qemu_mutex_unlock_iothread();
838     } else {
839         ri->writefn(env, ri, value);
840     }
841 }
842 
843 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip)
844 {
845     const ARMCPRegInfo *ri = rip;
846     uint64_t res;
847 
848     if (ri->type & ARM_CP_IO) {
849         qemu_mutex_lock_iothread();
850         res = ri->readfn(env, ri);
851         qemu_mutex_unlock_iothread();
852     } else {
853         res = ri->readfn(env, ri);
854     }
855 
856     return res;
857 }
858 
859 void HELPER(pre_hvc)(CPUARMState *env)
860 {
861     ARMCPU *cpu = env_archcpu(env);
862     int cur_el = arm_current_el(env);
863     /* FIXME: Use actual secure state.  */
864     bool secure = false;
865     bool undef;
866 
867     if (arm_is_psci_call(cpu, EXCP_HVC)) {
868         /* If PSCI is enabled and this looks like a valid PSCI call then
869          * that overrides the architecturally mandated HVC behaviour.
870          */
871         return;
872     }
873 
874     if (!arm_feature(env, ARM_FEATURE_EL2)) {
875         /* If EL2 doesn't exist, HVC always UNDEFs */
876         undef = true;
877     } else if (arm_feature(env, ARM_FEATURE_EL3)) {
878         /* EL3.HCE has priority over EL2.HCD. */
879         undef = !(env->cp15.scr_el3 & SCR_HCE);
880     } else {
881         undef = env->cp15.hcr_el2 & HCR_HCD;
882     }
883 
884     /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
885      * For ARMv8/AArch64, HVC is allowed in EL3.
886      * Note that we've already trapped HVC from EL0 at translation
887      * time.
888      */
889     if (secure && (!is_a64(env) || cur_el == 1)) {
890         undef = true;
891     }
892 
893     if (undef) {
894         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
895                         exception_target_el(env));
896     }
897 }
898 
899 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
900 {
901     ARMCPU *cpu = env_archcpu(env);
902     int cur_el = arm_current_el(env);
903     bool secure = arm_is_secure(env);
904     bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
905 
906     /*
907      * SMC behaviour is summarized in the following table.
908      * This helper handles the "Trap to EL2" and "Undef insn" cases.
909      * The "Trap to EL3" and "PSCI call" cases are handled in the exception
910      * helper.
911      *
912      *  -> ARM_FEATURE_EL3 and !SMD
913      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
914      *
915      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
916      *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
917      *  Conduit not SMC          Trap to EL2         Trap to EL3
918      *
919      *
920      *  -> ARM_FEATURE_EL3 and SMD
921      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
922      *
923      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
924      *  Conduit SMC, inval call  Trap to EL2         Undef insn
925      *  Conduit not SMC          Trap to EL2         Undef insn
926      *
927      *
928      *  -> !ARM_FEATURE_EL3
929      *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
930      *
931      *  Conduit SMC, valid call  Trap to EL2         PSCI Call
932      *  Conduit SMC, inval call  Trap to EL2         Undef insn
933      *  Conduit not SMC          Undef insn          Undef insn
934      */
935 
936     /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
937      * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
938      *  extensions, SMD only applies to NS state.
939      * On ARMv7 without the Virtualization extensions, the SMD bit
940      * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
941      * so we need not special case this here.
942      */
943     bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
944                                                      : smd_flag && !secure;
945 
946     if (!arm_feature(env, ARM_FEATURE_EL3) &&
947         cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
948         /* If we have no EL3 then SMC always UNDEFs and can't be
949          * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
950          * firmware within QEMU, and we want an EL2 guest to be able
951          * to forbid its EL1 from making PSCI calls into QEMU's
952          * "firmware" via HCR.TSC, so for these purposes treat
953          * PSCI-via-SMC as implying an EL3.
954          * This handles the very last line of the previous table.
955          */
956         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
957                         exception_target_el(env));
958     }
959 
960     if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
961         /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
962          * We also want an EL2 guest to be able to forbid its EL1 from
963          * making PSCI calls into QEMU's "firmware" via HCR.TSC.
964          * This handles all the "Trap to EL2" cases of the previous table.
965          */
966         raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
967     }
968 
969     /* Catch the two remaining "Undef insn" cases of the previous table:
970      *    - PSCI conduit is SMC but we don't have a valid PCSI call,
971      *    - We don't have EL3 or SMD is set.
972      */
973     if (!arm_is_psci_call(cpu, EXCP_SMC) &&
974         (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
975         raise_exception(env, EXCP_UDEF, syn_uncategorized(),
976                         exception_target_el(env));
977     }
978 }
979 
980 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
981    The only way to do that in TCG is a conditional branch, which clobbers
982    all our temporaries.  For now implement these as helper functions.  */
983 
984 /* Similarly for variable shift instructions.  */
985 
986 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
987 {
988     int shift = i & 0xff;
989     if (shift >= 32) {
990         if (shift == 32)
991             env->CF = x & 1;
992         else
993             env->CF = 0;
994         return 0;
995     } else if (shift != 0) {
996         env->CF = (x >> (32 - shift)) & 1;
997         return x << shift;
998     }
999     return x;
1000 }
1001 
1002 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1003 {
1004     int shift = i & 0xff;
1005     if (shift >= 32) {
1006         if (shift == 32)
1007             env->CF = (x >> 31) & 1;
1008         else
1009             env->CF = 0;
1010         return 0;
1011     } else if (shift != 0) {
1012         env->CF = (x >> (shift - 1)) & 1;
1013         return x >> shift;
1014     }
1015     return x;
1016 }
1017 
1018 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1019 {
1020     int shift = i & 0xff;
1021     if (shift >= 32) {
1022         env->CF = (x >> 31) & 1;
1023         return (int32_t)x >> 31;
1024     } else if (shift != 0) {
1025         env->CF = (x >> (shift - 1)) & 1;
1026         return (int32_t)x >> shift;
1027     }
1028     return x;
1029 }
1030 
1031 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1032 {
1033     int shift1, shift;
1034     shift1 = i & 0xff;
1035     shift = shift1 & 0x1f;
1036     if (shift == 0) {
1037         if (shift1 != 0)
1038             env->CF = (x >> 31) & 1;
1039         return x;
1040     } else {
1041         env->CF = (x >> (shift - 1)) & 1;
1042         return ((uint32_t)x >> shift) | (x << (32 - shift));
1043     }
1044 }
1045 
1046 void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
1047                           uint32_t access_type, uint32_t mmu_idx,
1048                           uint32_t size)
1049 {
1050     uint32_t in_page = -((uint32_t)ptr | TARGET_PAGE_SIZE);
1051     uintptr_t ra = GETPC();
1052 
1053     if (likely(size <= in_page)) {
1054         probe_access(env, ptr, size, access_type, mmu_idx, ra);
1055     } else {
1056         probe_access(env, ptr, in_page, access_type, mmu_idx, ra);
1057         probe_access(env, ptr + in_page, size - in_page,
1058                      access_type, mmu_idx, ra);
1059     }
1060 }
1061 
1062 /*
1063  * This function corresponds to AArch64.vESBOperation().
1064  * Note that the AArch32 version is not functionally different.
1065  */
1066 void HELPER(vesb)(CPUARMState *env)
1067 {
1068     /*
1069      * The EL2Enabled() check is done inside arm_hcr_el2_eff,
1070      * and will return HCR_EL2.VSE == 0, so nothing happens.
1071      */
1072     uint64_t hcr = arm_hcr_el2_eff(env);
1073     bool enabled = !(hcr & HCR_TGE) && (hcr & HCR_AMO);
1074     bool pending = enabled && (hcr & HCR_VSE);
1075     bool masked  = (env->daif & PSTATE_A);
1076 
1077     /* If VSE pending and masked, defer the exception.  */
1078     if (pending && masked) {
1079         uint32_t syndrome;
1080 
1081         if (arm_el_is_aa64(env, 1)) {
1082             /* Copy across IDS and ISS from VSESR. */
1083             syndrome = env->cp15.vsesr_el2 & 0x1ffffff;
1084         } else {
1085             ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal };
1086 
1087             if (extended_addresses_enabled(env)) {
1088                 syndrome = arm_fi_to_lfsc(&fi);
1089             } else {
1090                 syndrome = arm_fi_to_sfsc(&fi);
1091             }
1092             /* Copy across AET and ExT from VSESR. */
1093             syndrome |= env->cp15.vsesr_el2 & 0xd000;
1094         }
1095 
1096         /* Set VDISR_EL2.A along with the syndrome. */
1097         env->cp15.vdisr_el2 = syndrome | (1u << 31);
1098 
1099         /* Clear pending virtual SError */
1100         env->cp15.hcr_el2 &= ~HCR_VSE;
1101         cpu_reset_interrupt(env_cpu(env), CPU_INTERRUPT_VSERR);
1102     }
1103 }
1104