xref: /openbmc/qemu/target/arm/tcg/m_helper.c (revision 4889d9666076d8164171d1208ffb8b2be10463f6)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "cpu-features.h"
13 #include "gdbstub/helpers.h"
14 #include "exec/helper-proto.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/bitops.h"
17 #include "qemu/log.h"
18 #include "exec/page-protection.h"
19 #ifdef CONFIG_TCG
20 #include "accel/tcg/cpu-ldst.h"
21 #include "semihosting/common-semi.h"
22 #endif
23 #if !defined(CONFIG_USER_ONLY)
24 #include "hw/intc/armv7m_nvic.h"
25 #endif
26 #include "qemu/plugin.h"
27 
28 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
29                          uint32_t reg, uint32_t val)
30 {
31     /* Only APSR is actually writable */
32     if (!(reg & 4)) {
33         uint32_t apsrmask = 0;
34 
35         if (mask & 8) {
36             apsrmask |= XPSR_NZCV | XPSR_Q;
37         }
38         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
39             apsrmask |= XPSR_GE;
40         }
41         xpsr_write(env, val, apsrmask);
42     }
43 }
44 
45 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
46 {
47     uint32_t mask = 0;
48 
49     if ((reg & 1) && el) {
50         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
51     }
52     if (!(reg & 4)) {
53         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
54         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
55             mask |= XPSR_GE;
56         }
57     }
58     /* EPSR reads as zero */
59     return xpsr_read(env) & mask;
60 }
61 
62 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
63 {
64     uint32_t value = env->v7m.control[secure];
65 
66     if (!secure) {
67         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
68         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
69     }
70     return value;
71 }
72 
73 #ifdef CONFIG_USER_ONLY
74 
75 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
76 {
77     uint32_t mask = extract32(maskreg, 8, 4);
78     uint32_t reg = extract32(maskreg, 0, 8);
79 
80     switch (reg) {
81     case 0 ... 7: /* xPSR sub-fields */
82         v7m_msr_xpsr(env, mask, reg, val);
83         break;
84     case 20: /* CONTROL */
85         /* There are no sub-fields that are actually writable from EL0. */
86         break;
87     default:
88         /* Unprivileged writes to other registers are ignored */
89         break;
90     }
91 }
92 
93 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
94 {
95     switch (reg) {
96     case 0 ... 7: /* xPSR sub-fields */
97         return v7m_mrs_xpsr(env, reg, 0);
98     case 20: /* CONTROL */
99         return arm_v7m_mrs_control(env, 0);
100     default:
101         /* Unprivileged reads others as zero.  */
102         return 0;
103     }
104 }
105 
106 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
107 {
108     /* translate.c should never generate calls here in user-only mode */
109     g_assert_not_reached();
110 }
111 
112 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
113 {
114     /* translate.c should never generate calls here in user-only mode */
115     g_assert_not_reached();
116 }
117 
118 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
119 {
120     /* translate.c should never generate calls here in user-only mode */
121     g_assert_not_reached();
122 }
123 
124 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
125 {
126     /* translate.c should never generate calls here in user-only mode */
127     g_assert_not_reached();
128 }
129 
130 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
131 {
132     /* translate.c should never generate calls here in user-only mode */
133     g_assert_not_reached();
134 }
135 
136 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
137 {
138     /*
139      * The TT instructions can be used by unprivileged code, but in
140      * user-only emulation we don't have the MPU.
141      * Luckily since we know we are NonSecure unprivileged (and that in
142      * turn means that the A flag wasn't specified), all the bits in the
143      * register must be zero:
144      *  IREGION: 0 because IRVALID is 0
145      *  IRVALID: 0 because NS
146      *  S: 0 because NS
147      *  NSRW: 0 because NS
148      *  NSR: 0 because NS
149      *  RW: 0 because unpriv and A flag not set
150      *  R: 0 because unpriv and A flag not set
151      *  SRVALID: 0 because NS
152      *  MRVALID: 0 because unpriv and A flag not set
153      *  SREGION: 0 because SRVALID is 0
154      *  MREGION: 0 because MRVALID is 0
155      */
156     return 0;
157 }
158 
159 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
160 {
161     return ARMMMUIdx_MUser;
162 }
163 
164 #else /* !CONFIG_USER_ONLY */
165 
166 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
167                                      bool secstate, bool priv, bool negpri)
168 {
169     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
170 
171     if (priv) {
172         mmu_idx |= ARM_MMU_IDX_M_PRIV;
173     }
174 
175     if (negpri) {
176         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
177     }
178 
179     if (secstate) {
180         mmu_idx |= ARM_MMU_IDX_M_S;
181     }
182 
183     return mmu_idx;
184 }
185 
186 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
187                                                        bool secstate, bool priv)
188 {
189     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
190 
191     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
192 }
193 
194 /* Return the MMU index for a v7M CPU in the specified security state */
195 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
196 {
197     bool priv = arm_v7m_is_handler_mode(env) ||
198         !(env->v7m.control[secstate] & 1);
199 
200     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
201 }
202 
203 /*
204  * What kind of stack write are we doing? This affects how exceptions
205  * generated during the stacking are treated.
206  */
207 typedef enum StackingMode {
208     STACK_NORMAL,
209     STACK_IGNFAULTS,
210     STACK_LAZYFP,
211 } StackingMode;
212 
213 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
214                             ARMMMUIdx mmu_idx, StackingMode mode)
215 {
216     CPUState *cs = CPU(cpu);
217     CPUARMState *env = &cpu->env;
218     MemTxResult txres;
219     GetPhysAddrResult res = {};
220     ARMMMUFaultInfo fi = {};
221     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
222     int exc;
223     bool exc_secure;
224 
225     if (get_phys_addr(env, addr, MMU_DATA_STORE, 0, mmu_idx, &res, &fi)) {
226         /* MPU/SAU lookup failed */
227         if (fi.type == ARMFault_QEMU_SFault) {
228             if (mode == STACK_LAZYFP) {
229                 qemu_log_mask(CPU_LOG_INT,
230                               "...SecureFault with SFSR.LSPERR "
231                               "during lazy stacking\n");
232                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
233             } else {
234                 qemu_log_mask(CPU_LOG_INT,
235                               "...SecureFault with SFSR.AUVIOL "
236                               "during stacking\n");
237                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
238             }
239             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
240             env->v7m.sfar = addr;
241             exc = ARMV7M_EXCP_SECURE;
242             exc_secure = false;
243         } else {
244             if (mode == STACK_LAZYFP) {
245                 qemu_log_mask(CPU_LOG_INT,
246                               "...MemManageFault with CFSR.MLSPERR\n");
247                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
248             } else {
249                 qemu_log_mask(CPU_LOG_INT,
250                               "...MemManageFault with CFSR.MSTKERR\n");
251                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
252             }
253             exc = ARMV7M_EXCP_MEM;
254             exc_secure = secure;
255         }
256         goto pend_fault;
257     }
258     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
259                          value, res.f.attrs, &txres);
260     if (txres != MEMTX_OK) {
261         /* BusFault trying to write the data */
262         if (mode == STACK_LAZYFP) {
263             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
264             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
265         } else {
266             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
267             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
268         }
269         exc = ARMV7M_EXCP_BUS;
270         exc_secure = false;
271         goto pend_fault;
272     }
273     return true;
274 
275 pend_fault:
276     /*
277      * By pending the exception at this point we are making
278      * the IMPDEF choice "overridden exceptions pended" (see the
279      * MergeExcInfo() pseudocode). The other choice would be to not
280      * pend them now and then make a choice about which to throw away
281      * later if we have two derived exceptions.
282      * The only case when we must not pend the exception but instead
283      * throw it away is if we are doing the push of the callee registers
284      * and we've already generated a derived exception (this is indicated
285      * by the caller passing STACK_IGNFAULTS). Even in this case we will
286      * still update the fault status registers.
287      */
288     switch (mode) {
289     case STACK_NORMAL:
290         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
291         break;
292     case STACK_LAZYFP:
293         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
294         break;
295     case STACK_IGNFAULTS:
296         break;
297     }
298     return false;
299 }
300 
301 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
302                            ARMMMUIdx mmu_idx)
303 {
304     CPUState *cs = CPU(cpu);
305     CPUARMState *env = &cpu->env;
306     MemTxResult txres;
307     GetPhysAddrResult res = {};
308     ARMMMUFaultInfo fi = {};
309     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
310     int exc;
311     bool exc_secure;
312     uint32_t value;
313 
314     if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
315         /* MPU/SAU lookup failed */
316         if (fi.type == ARMFault_QEMU_SFault) {
317             qemu_log_mask(CPU_LOG_INT,
318                           "...SecureFault with SFSR.AUVIOL during unstack\n");
319             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
320             env->v7m.sfar = addr;
321             exc = ARMV7M_EXCP_SECURE;
322             exc_secure = false;
323         } else {
324             qemu_log_mask(CPU_LOG_INT,
325                           "...MemManageFault with CFSR.MUNSTKERR\n");
326             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
327             exc = ARMV7M_EXCP_MEM;
328             exc_secure = secure;
329         }
330         goto pend_fault;
331     }
332 
333     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
334                               res.f.phys_addr, res.f.attrs, &txres);
335     if (txres != MEMTX_OK) {
336         /* BusFault trying to read the data */
337         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
338         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
339         exc = ARMV7M_EXCP_BUS;
340         exc_secure = false;
341         goto pend_fault;
342     }
343 
344     *dest = value;
345     return true;
346 
347 pend_fault:
348     /*
349      * By pending the exception at this point we are making
350      * the IMPDEF choice "overridden exceptions pended" (see the
351      * MergeExcInfo() pseudocode). The other choice would be to not
352      * pend them now and then make a choice about which to throw away
353      * later if we have two derived exceptions.
354      */
355     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
356     return false;
357 }
358 
359 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
360 {
361     /*
362      * Preserve FP state (because LSPACT was set and we are about
363      * to execute an FP instruction). This corresponds to the
364      * PreserveFPState() pseudocode.
365      * We may throw an exception if the stacking fails.
366      */
367     ARMCPU *cpu = env_archcpu(env);
368     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
369     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
370     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
371     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
372     uint32_t fpcar = env->v7m.fpcar[is_secure];
373     bool stacked_ok = true;
374     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
375     bool take_exception;
376 
377     /* Take the BQL as we are going to touch the NVIC */
378     bql_lock();
379 
380     /* Check the background context had access to the FPU */
381     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
382         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
383         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
384         stacked_ok = false;
385     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
386         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
387         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
388         stacked_ok = false;
389     }
390 
391     if (!splimviol && stacked_ok) {
392         /* We only stack if the stack limit wasn't violated */
393         int i;
394         ARMMMUIdx mmu_idx;
395 
396         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
397         for (i = 0; i < (ts ? 32 : 16); i += 2) {
398             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
399             uint32_t faddr = fpcar + 4 * i;
400             uint32_t slo = extract64(dn, 0, 32);
401             uint32_t shi = extract64(dn, 32, 32);
402 
403             if (i >= 16) {
404                 faddr += 8; /* skip the slot for the FPSCR/VPR */
405             }
406             stacked_ok = stacked_ok &&
407                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
408                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
409         }
410 
411         stacked_ok = stacked_ok &&
412             v7m_stack_write(cpu, fpcar + 0x40,
413                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
414         if (cpu_isar_feature(aa32_mve, cpu)) {
415             stacked_ok = stacked_ok &&
416                 v7m_stack_write(cpu, fpcar + 0x44,
417                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
418         }
419     }
420 
421     /*
422      * We definitely pended an exception, but it's possible that it
423      * might not be able to be taken now. If its priority permits us
424      * to take it now, then we must not update the LSPACT or FP regs,
425      * but instead jump out to take the exception immediately.
426      * If it's just pending and won't be taken until the current
427      * handler exits, then we do update LSPACT and the FP regs.
428      */
429     take_exception = !stacked_ok &&
430         armv7m_nvic_can_take_pending_exception(env->nvic);
431 
432     bql_unlock();
433 
434     if (take_exception) {
435         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
436     }
437 
438     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
439 
440     if (ts) {
441         /* Clear s0 to s31 and the FPSCR and VPR */
442         int i;
443 
444         for (i = 0; i < 32; i += 2) {
445             *aa32_vfp_dreg(env, i / 2) = 0;
446         }
447         vfp_set_fpscr(env, 0);
448         if (cpu_isar_feature(aa32_mve, cpu)) {
449             env->v7m.vpr = 0;
450         }
451     }
452     /*
453      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
454      * unchanged.
455      */
456 }
457 
458 /*
459  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
460  * This may change the current stack pointer between Main and Process
461  * stack pointers if it is done for the CONTROL register for the current
462  * security state.
463  */
464 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
465                                                  bool new_spsel,
466                                                  bool secstate)
467 {
468     bool old_is_psp = v7m_using_psp(env);
469 
470     env->v7m.control[secstate] =
471         deposit32(env->v7m.control[secstate],
472                   R_V7M_CONTROL_SPSEL_SHIFT,
473                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
474 
475     if (secstate == env->v7m.secure) {
476         bool new_is_psp = v7m_using_psp(env);
477         uint32_t tmp;
478 
479         if (old_is_psp != new_is_psp) {
480             tmp = env->v7m.other_sp;
481             env->v7m.other_sp = env->regs[13];
482             env->regs[13] = tmp;
483         }
484     }
485 }
486 
487 /*
488  * Write to v7M CONTROL.SPSEL bit. This may change the current
489  * stack pointer between Main and Process stack pointers.
490  */
491 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
492 {
493     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
494 }
495 
496 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
497 {
498     /*
499      * Write a new value to v7m.exception, thus transitioning into or out
500      * of Handler mode; this may result in a change of active stack pointer.
501      */
502     bool new_is_psp, old_is_psp = v7m_using_psp(env);
503     uint32_t tmp;
504 
505     env->v7m.exception = new_exc;
506 
507     new_is_psp = v7m_using_psp(env);
508 
509     if (old_is_psp != new_is_psp) {
510         tmp = env->v7m.other_sp;
511         env->v7m.other_sp = env->regs[13];
512         env->regs[13] = tmp;
513     }
514 }
515 
516 /* Switch M profile security state between NS and S */
517 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
518 {
519     uint32_t new_ss_msp, new_ss_psp;
520 
521     if (env->v7m.secure == new_secstate) {
522         return;
523     }
524 
525     /*
526      * All the banked state is accessed by looking at env->v7m.secure
527      * except for the stack pointer; rearrange the SP appropriately.
528      */
529     new_ss_msp = env->v7m.other_ss_msp;
530     new_ss_psp = env->v7m.other_ss_psp;
531 
532     if (v7m_using_psp(env)) {
533         env->v7m.other_ss_psp = env->regs[13];
534         env->v7m.other_ss_msp = env->v7m.other_sp;
535     } else {
536         env->v7m.other_ss_msp = env->regs[13];
537         env->v7m.other_ss_psp = env->v7m.other_sp;
538     }
539 
540     env->v7m.secure = new_secstate;
541 
542     if (v7m_using_psp(env)) {
543         env->regs[13] = new_ss_psp;
544         env->v7m.other_sp = new_ss_msp;
545     } else {
546         env->regs[13] = new_ss_msp;
547         env->v7m.other_sp = new_ss_psp;
548     }
549 }
550 
551 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
552 {
553     /*
554      * Handle v7M BXNS:
555      *  - if the return value is a magic value, do exception return (like BX)
556      *  - otherwise bit 0 of the return value is the target security state
557      */
558     uint32_t min_magic;
559 
560     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
561         /* Covers FNC_RETURN and EXC_RETURN magic */
562         min_magic = FNC_RETURN_MIN_MAGIC;
563     } else {
564         /* EXC_RETURN magic only */
565         min_magic = EXC_RETURN_MIN_MAGIC;
566     }
567 
568     if (dest >= min_magic) {
569         /*
570          * This is an exception return magic value; put it where
571          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
572          * Note that if we ever add gen_ss_advance() singlestep support to
573          * M profile this should count as an "instruction execution complete"
574          * event (compare gen_bx_excret_final_code()).
575          */
576         env->regs[15] = dest & ~1;
577         env->thumb = dest & 1;
578         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
579         /* notreached */
580     }
581 
582     /* translate.c should have made BXNS UNDEF unless we're secure */
583     assert(env->v7m.secure);
584 
585     if (!(dest & 1)) {
586         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
587     }
588     switch_v7m_security_state(env, dest & 1);
589     env->thumb = true;
590     env->regs[15] = dest & ~1;
591     arm_rebuild_hflags(env);
592 }
593 
594 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
595 {
596     /*
597      * Handle v7M BLXNS:
598      *  - bit 0 of the destination address is the target security state
599      */
600 
601     /* At this point regs[15] is the address just after the BLXNS */
602     uint32_t nextinst = env->regs[15] | 1;
603     uint32_t sp = env->regs[13] - 8;
604     uint32_t saved_psr;
605 
606     /* translate.c will have made BLXNS UNDEF unless we're secure */
607     assert(env->v7m.secure);
608 
609     if (dest & 1) {
610         /*
611          * Target is Secure, so this is just a normal BLX,
612          * except that the low bit doesn't indicate Thumb/not.
613          */
614         env->regs[14] = nextinst;
615         env->thumb = true;
616         env->regs[15] = dest & ~1;
617         return;
618     }
619 
620     /* Target is non-secure: first push a stack frame */
621     if (!QEMU_IS_ALIGNED(sp, 8)) {
622         qemu_log_mask(LOG_GUEST_ERROR,
623                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
624     }
625 
626     if (sp < v7m_sp_limit(env)) {
627         raise_exception(env, EXCP_STKOF, 0, 1);
628     }
629 
630     saved_psr = env->v7m.exception;
631     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
632         saved_psr |= XPSR_SFPA;
633     }
634 
635     /* Note that these stores can throw exceptions on MPU faults */
636     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
637     MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
638                                  arm_to_core_mmu_idx(mmu_idx));
639     cpu_stl_mmu(env, sp, nextinst, oi, GETPC());
640     cpu_stl_mmu(env, sp + 4, saved_psr, oi, GETPC());
641 
642     env->regs[13] = sp;
643     env->regs[14] = 0xfeffffff;
644     if (arm_v7m_is_handler_mode(env)) {
645         /*
646          * Write a dummy value to IPSR, to avoid leaking the current secure
647          * exception number to non-secure code. This is guaranteed not
648          * to cause write_v7m_exception() to actually change stacks.
649          */
650         write_v7m_exception(env, 1);
651     }
652     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
653     switch_v7m_security_state(env, 0);
654     env->thumb = true;
655     env->regs[15] = dest;
656     arm_rebuild_hflags(env);
657 }
658 
659 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
660                                 uint32_t *pvec)
661 {
662     CPUState *cs = CPU(cpu);
663     CPUARMState *env = &cpu->env;
664     MemTxResult result;
665     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
666     uint32_t vector_entry;
667     MemTxAttrs attrs = {};
668     ARMMMUIdx mmu_idx;
669     bool exc_secure;
670 
671     qemu_log_mask(CPU_LOG_INT,
672                   "...loading from element %d of %s vector table at 0x%x\n",
673                   exc, targets_secure ? "secure" : "non-secure", addr);
674 
675     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
676 
677     /*
678      * We don't do a get_phys_addr() here because the rules for vector
679      * loads are special: they always use the default memory map, and
680      * the default memory map permits reads from all addresses.
681      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
682      * that we want this special case which would always say "yes",
683      * we just do the SAU lookup here followed by a direct physical load.
684      */
685     attrs.secure = targets_secure;
686     attrs.user = false;
687 
688     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
689         V8M_SAttributes sattrs = {};
690 
691         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
692                             targets_secure, &sattrs);
693         if (sattrs.ns) {
694             attrs.secure = false;
695         } else if (!targets_secure) {
696             /*
697              * NS access to S memory: the underlying exception which we escalate
698              * to HardFault is SecureFault, which always targets Secure.
699              */
700             exc_secure = true;
701             goto load_fail;
702         }
703     }
704 
705     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
706                                      attrs, &result);
707     if (result != MEMTX_OK) {
708         /*
709          * Underlying exception is BusFault: its target security state
710          * depends on BFHFNMINS.
711          */
712         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
713         goto load_fail;
714     }
715     *pvec = vector_entry;
716     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
717     return true;
718 
719 load_fail:
720     /*
721      * All vector table fetch fails are reported as HardFault, with
722      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
723      * technically the underlying exception is a SecureFault or BusFault
724      * that is escalated to HardFault.) This is a terminal exception,
725      * so we will either take the HardFault immediately or else enter
726      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
727      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
728      * secure); otherwise it targets the same security state as the
729      * underlying exception.
730      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
731      */
732     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
733         exc_secure = true;
734     }
735     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
736     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
737         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
738     }
739     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
740     return false;
741 }
742 
743 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
744 {
745     /*
746      * Return the integrity signature value for the callee-saves
747      * stack frame section. @lr is the exception return payload/LR value
748      * whose FType bit forms bit 0 of the signature if FP is present.
749      */
750     uint32_t sig = 0xfefa125a;
751 
752     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
753         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
754         sig |= 1;
755     }
756     return sig;
757 }
758 
759 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
760                                   bool ignore_faults)
761 {
762     /*
763      * For v8M, push the callee-saves register part of the stack frame.
764      * Compare the v8M pseudocode PushCalleeStack().
765      * In the tailchaining case this may not be the current stack.
766      */
767     CPUARMState *env = &cpu->env;
768     uint32_t *frame_sp_p;
769     uint32_t frameptr;
770     ARMMMUIdx mmu_idx;
771     bool stacked_ok;
772     uint32_t limit;
773     bool want_psp;
774     uint32_t sig;
775     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
776 
777     if (dotailchain) {
778         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
779         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
780             !mode;
781 
782         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
783         frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
784                                         lr & R_V7M_EXCRET_SPSEL_MASK);
785         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
786         if (want_psp) {
787             limit = env->v7m.psplim[M_REG_S];
788         } else {
789             limit = env->v7m.msplim[M_REG_S];
790         }
791     } else {
792         mmu_idx = arm_mmu_idx(env);
793         frame_sp_p = &env->regs[13];
794         limit = v7m_sp_limit(env);
795     }
796 
797     frameptr = *frame_sp_p - 0x28;
798     if (frameptr < limit) {
799         /*
800          * Stack limit failure: set SP to the limit value, and generate
801          * STKOF UsageFault. Stack pushes below the limit must not be
802          * performed. It is IMPDEF whether pushes above the limit are
803          * performed; we choose not to.
804          */
805         qemu_log_mask(CPU_LOG_INT,
806                       "...STKOF during callee-saves register stacking\n");
807         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
808         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
809                                 env->v7m.secure);
810         *frame_sp_p = limit;
811         return true;
812     }
813 
814     /*
815      * Write as much of the stack frame as we can. A write failure may
816      * cause us to pend a derived exception.
817      */
818     sig = v7m_integrity_sig(env, lr);
819     stacked_ok =
820         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
821         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
822         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
823         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
824         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
825         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
826         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
827         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
828         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
829 
830     /* Update SP regardless of whether any of the stack accesses failed. */
831     *frame_sp_p = frameptr;
832 
833     return !stacked_ok;
834 }
835 
836 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
837                                 bool ignore_stackfaults)
838 {
839     /*
840      * Do the "take the exception" parts of exception entry,
841      * but not the pushing of state to the stack. This is
842      * similar to the pseudocode ExceptionTaken() function.
843      */
844     CPUARMState *env = &cpu->env;
845     uint32_t addr;
846     bool targets_secure;
847     int exc;
848     bool push_failed = false;
849 
850     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
851     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
852                   targets_secure ? "secure" : "nonsecure", exc);
853 
854     if (dotailchain) {
855         /* Sanitize LR FType and PREFIX bits */
856         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
857             lr |= R_V7M_EXCRET_FTYPE_MASK;
858         }
859         lr = deposit32(lr, 24, 8, 0xff);
860     }
861 
862     if (arm_feature(env, ARM_FEATURE_V8)) {
863         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
864             (lr & R_V7M_EXCRET_S_MASK)) {
865             /*
866              * The background code (the owner of the registers in the
867              * exception frame) is Secure. This means it may either already
868              * have or now needs to push callee-saves registers.
869              */
870             if (targets_secure) {
871                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
872                     /*
873                      * We took an exception from Secure to NonSecure
874                      * (which means the callee-saved registers got stacked)
875                      * and are now tailchaining to a Secure exception.
876                      * Clear DCRS so eventual return from this Secure
877                      * exception unstacks the callee-saved registers.
878                      */
879                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
880                 }
881             } else {
882                 /*
883                  * We're going to a non-secure exception; push the
884                  * callee-saves registers to the stack now, if they're
885                  * not already saved.
886                  */
887                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
888                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
889                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
890                                                         ignore_stackfaults);
891                 }
892                 lr |= R_V7M_EXCRET_DCRS_MASK;
893             }
894         }
895 
896         lr &= ~R_V7M_EXCRET_ES_MASK;
897         if (targets_secure) {
898             lr |= R_V7M_EXCRET_ES_MASK;
899         }
900         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
901         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
902             lr |= R_V7M_EXCRET_SPSEL_MASK;
903         }
904 
905         /*
906          * Clear registers if necessary to prevent non-secure exception
907          * code being able to see register values from secure code.
908          * Where register values become architecturally UNKNOWN we leave
909          * them with their previous values. v8.1M is tighter than v8.0M
910          * here and always zeroes the caller-saved registers regardless
911          * of the security state the exception is targeting.
912          */
913         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
914             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
915                 /*
916                  * Always clear the caller-saved registers (they have been
917                  * pushed to the stack earlier in v7m_push_stack()).
918                  * Clear callee-saved registers if the background code is
919                  * Secure (in which case these regs were saved in
920                  * v7m_push_callee_stack()).
921                  */
922                 int i;
923                 /*
924                  * r4..r11 are callee-saves, zero only if background
925                  * state was Secure (EXCRET.S == 1) and exception
926                  * targets Non-secure state
927                  */
928                 bool zero_callee_saves = !targets_secure &&
929                     (lr & R_V7M_EXCRET_S_MASK);
930 
931                 for (i = 0; i < 13; i++) {
932                     if (i < 4 || i > 11 || zero_callee_saves) {
933                         env->regs[i] = 0;
934                     }
935                 }
936                 /* Clear EAPSR */
937                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
938             }
939         }
940     }
941 
942     if (push_failed && !ignore_stackfaults) {
943         /*
944          * Derived exception on callee-saves register stacking:
945          * we might now want to take a different exception which
946          * targets a different security state, so try again from the top.
947          */
948         qemu_log_mask(CPU_LOG_INT,
949                       "...derived exception on callee-saves register stacking");
950         v7m_exception_taken(cpu, lr, true, true);
951         return;
952     }
953 
954     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
955         /* Vector load failed: derived exception */
956         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
957         v7m_exception_taken(cpu, lr, true, true);
958         return;
959     }
960 
961     /*
962      * Now we've done everything that might cause a derived exception
963      * we can go ahead and activate whichever exception we're going to
964      * take (which might now be the derived exception).
965      */
966     armv7m_nvic_acknowledge_irq(env->nvic);
967 
968     /* Switch to target security state -- must do this before writing SPSEL */
969     switch_v7m_security_state(env, targets_secure);
970     write_v7m_control_spsel(env, 0);
971     arm_clear_exclusive(env);
972     /* Clear SFPA and FPCA (has no effect if no FPU) */
973     env->v7m.control[M_REG_S] &=
974         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
975     /* Clear IT bits */
976     env->condexec_bits = 0;
977     env->regs[14] = lr;
978     env->regs[15] = addr & 0xfffffffe;
979     env->thumb = addr & 1;
980     arm_rebuild_hflags(env);
981 }
982 
983 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
984                              bool apply_splim)
985 {
986     /*
987      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
988      * that we will need later in order to do lazy FP reg stacking.
989      */
990     bool is_secure = env->v7m.secure;
991     NVICState *nvic = env->nvic;
992     /*
993      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
994      * are banked and we want to update the bit in the bank for the
995      * current security state; and in one case we want to specifically
996      * update the NS banked version of a bit even if we are secure.
997      */
998     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
999     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
1000     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
1001     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
1002 
1003     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1004 
1005     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1006         bool splimviol;
1007         uint32_t splim = v7m_sp_limit(env);
1008         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1009             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1010 
1011         splimviol = !ign && frameptr < splim;
1012         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1013     }
1014 
1015     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1016 
1017     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1018 
1019     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1020 
1021     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1022                         !arm_v7m_is_handler_mode(env));
1023 
1024     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1025     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1026 
1027     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1028     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1029 
1030     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1031     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1032 
1033     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1034     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1035 
1036     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1037     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1038 
1039     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1040         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1041         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1042 
1043         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1044         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1045     }
1046 }
1047 
1048 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1049 {
1050     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1051     ARMCPU *cpu = env_archcpu(env);
1052     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1053     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1054     uintptr_t ra = GETPC();
1055     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1056     MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
1057                                  arm_to_core_mmu_idx(mmu_idx));
1058 
1059     assert(env->v7m.secure);
1060 
1061     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1062         return;
1063     }
1064 
1065     /* Check access to the coprocessor is permitted */
1066     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1067         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1068     }
1069 
1070     if (lspact) {
1071         /* LSPACT should not be active when there is active FP state */
1072         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1073     }
1074 
1075     if (fptr & 7) {
1076         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1077     }
1078 
1079     /*
1080      * Note that we do not use v7m_stack_write() here, because the
1081      * accesses should not set the FSR bits for stacking errors if they
1082      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1083      * or AccType_LAZYFP). Faults in cpu_stl_mmu() will throw exceptions
1084      * and longjmp out.
1085      */
1086     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1087         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1088         int i;
1089 
1090         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1091             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1092             uint32_t faddr = fptr + 4 * i;
1093             uint32_t slo = extract64(dn, 0, 32);
1094             uint32_t shi = extract64(dn, 32, 32);
1095 
1096             if (i >= 16) {
1097                 faddr += 8; /* skip the slot for the FPSCR */
1098             }
1099             cpu_stl_mmu(env, faddr, slo, oi, ra);
1100             cpu_stl_mmu(env, faddr + 4, shi, oi, ra);
1101         }
1102         cpu_stl_mmu(env, fptr + 0x40, vfp_get_fpscr(env), oi, ra);
1103         if (cpu_isar_feature(aa32_mve, cpu)) {
1104             cpu_stl_mmu(env, fptr + 0x44, env->v7m.vpr, oi, ra);
1105         }
1106 
1107         /*
1108          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1109          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1110          */
1111         if (ts) {
1112             for (i = 0; i < 32; i += 2) {
1113                 *aa32_vfp_dreg(env, i / 2) = 0;
1114             }
1115             vfp_set_fpscr(env, 0);
1116             if (cpu_isar_feature(aa32_mve, cpu)) {
1117                 env->v7m.vpr = 0;
1118             }
1119         }
1120     } else {
1121         v7m_update_fpccr(env, fptr, false);
1122     }
1123 
1124     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1125 }
1126 
1127 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1128 {
1129     ARMCPU *cpu = env_archcpu(env);
1130     uintptr_t ra = GETPC();
1131     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1132     MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
1133                                  arm_to_core_mmu_idx(mmu_idx));
1134 
1135     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1136     assert(env->v7m.secure);
1137 
1138     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1139         return;
1140     }
1141 
1142     /* Check access to the coprocessor is permitted */
1143     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1144         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1145     }
1146 
1147     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1148         /* State in FP is still valid */
1149         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1150     } else {
1151         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1152         int i;
1153         uint32_t fpscr;
1154 
1155         if (fptr & 7) {
1156             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1157         }
1158 
1159         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1160             uint32_t slo, shi;
1161             uint64_t dn;
1162             uint32_t faddr = fptr + 4 * i;
1163 
1164             if (i >= 16) {
1165                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1166             }
1167 
1168             slo = cpu_ldl_mmu(env, faddr, oi, ra);
1169             shi = cpu_ldl_mmu(env, faddr + 4, oi, ra);
1170 
1171             dn = (uint64_t) shi << 32 | slo;
1172             *aa32_vfp_dreg(env, i / 2) = dn;
1173         }
1174         fpscr = cpu_ldl_mmu(env, fptr + 0x40, oi, ra);
1175         vfp_set_fpscr(env, fpscr);
1176         if (cpu_isar_feature(aa32_mve, cpu)) {
1177             env->v7m.vpr = cpu_ldl_mmu(env, fptr + 0x44, oi, ra);
1178         }
1179     }
1180 
1181     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1182 }
1183 
1184 static bool v7m_push_stack(ARMCPU *cpu)
1185 {
1186     /*
1187      * Do the "set up stack frame" part of exception entry,
1188      * similar to pseudocode PushStack().
1189      * Return true if we generate a derived exception (and so
1190      * should ignore further stack faults trying to process
1191      * that derived exception.)
1192      */
1193     bool stacked_ok = true, limitviol = false;
1194     CPUARMState *env = &cpu->env;
1195     uint32_t xpsr = xpsr_read(env);
1196     uint32_t frameptr = env->regs[13];
1197     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1198     uint32_t framesize;
1199     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1200 
1201     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1202         (env->v7m.secure || nsacr_cp10)) {
1203         if (env->v7m.secure &&
1204             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1205             framesize = 0xa8;
1206         } else {
1207             framesize = 0x68;
1208         }
1209     } else {
1210         framesize = 0x20;
1211     }
1212 
1213     /* Align stack pointer if the guest wants that */
1214     if ((frameptr & 4) &&
1215         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1216         frameptr -= 4;
1217         xpsr |= XPSR_SPREALIGN;
1218     }
1219 
1220     xpsr &= ~XPSR_SFPA;
1221     if (env->v7m.secure &&
1222         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1223         xpsr |= XPSR_SFPA;
1224     }
1225 
1226     frameptr -= framesize;
1227 
1228     if (arm_feature(env, ARM_FEATURE_V8)) {
1229         uint32_t limit = v7m_sp_limit(env);
1230 
1231         if (frameptr < limit) {
1232             /*
1233              * Stack limit failure: set SP to the limit value, and generate
1234              * STKOF UsageFault. Stack pushes below the limit must not be
1235              * performed. It is IMPDEF whether pushes above the limit are
1236              * performed; we choose not to.
1237              */
1238             qemu_log_mask(CPU_LOG_INT,
1239                           "...STKOF during stacking\n");
1240             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1241             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1242                                     env->v7m.secure);
1243             env->regs[13] = limit;
1244             /*
1245              * We won't try to perform any further memory accesses but
1246              * we must continue through the following code to check for
1247              * permission faults during FPU state preservation, and we
1248              * must update FPCCR if lazy stacking is enabled.
1249              */
1250             limitviol = true;
1251             stacked_ok = false;
1252         }
1253     }
1254 
1255     /*
1256      * Write as much of the stack frame as we can. If we fail a stack
1257      * write this will result in a derived exception being pended
1258      * (which may be taken in preference to the one we started with
1259      * if it has higher priority).
1260      */
1261     stacked_ok = stacked_ok &&
1262         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1263         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1264                         mmu_idx, STACK_NORMAL) &&
1265         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1266                         mmu_idx, STACK_NORMAL) &&
1267         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1268                         mmu_idx, STACK_NORMAL) &&
1269         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1270                         mmu_idx, STACK_NORMAL) &&
1271         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1272                         mmu_idx, STACK_NORMAL) &&
1273         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1274                         mmu_idx, STACK_NORMAL) &&
1275         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1276 
1277     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1278         /* FPU is active, try to save its registers */
1279         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1280         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1281 
1282         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1283             qemu_log_mask(CPU_LOG_INT,
1284                           "...SecureFault because LSPACT and FPCA both set\n");
1285             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1286             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1287         } else if (!env->v7m.secure && !nsacr_cp10) {
1288             qemu_log_mask(CPU_LOG_INT,
1289                           "...Secure UsageFault with CFSR.NOCP because "
1290                           "NSACR.CP10 prevents stacking FP regs\n");
1291             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1292             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1293         } else {
1294             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1295                 /* Lazy stacking disabled, save registers now */
1296                 int i;
1297                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1298                                                  arm_current_el(env) != 0);
1299 
1300                 if (stacked_ok && !cpacr_pass) {
1301                     /*
1302                      * Take UsageFault if CPACR forbids access. The pseudocode
1303                      * here does a full CheckCPEnabled() but we know the NSACR
1304                      * check can never fail as we have already handled that.
1305                      */
1306                     qemu_log_mask(CPU_LOG_INT,
1307                                   "...UsageFault with CFSR.NOCP because "
1308                                   "CPACR.CP10 prevents stacking FP regs\n");
1309                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1310                                             env->v7m.secure);
1311                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1312                     stacked_ok = false;
1313                 }
1314 
1315                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1316                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1317                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1318                     uint32_t slo = extract64(dn, 0, 32);
1319                     uint32_t shi = extract64(dn, 32, 32);
1320 
1321                     if (i >= 16) {
1322                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1323                     }
1324                     stacked_ok = stacked_ok &&
1325                         v7m_stack_write(cpu, faddr, slo,
1326                                         mmu_idx, STACK_NORMAL) &&
1327                         v7m_stack_write(cpu, faddr + 4, shi,
1328                                         mmu_idx, STACK_NORMAL);
1329                 }
1330                 stacked_ok = stacked_ok &&
1331                     v7m_stack_write(cpu, frameptr + 0x60,
1332                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1333                 if (cpu_isar_feature(aa32_mve, cpu)) {
1334                     stacked_ok = stacked_ok &&
1335                         v7m_stack_write(cpu, frameptr + 0x64,
1336                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1337                 }
1338                 if (cpacr_pass) {
1339                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1340                         *aa32_vfp_dreg(env, i / 2) = 0;
1341                     }
1342                     vfp_set_fpscr(env, 0);
1343                     if (cpu_isar_feature(aa32_mve, cpu)) {
1344                         env->v7m.vpr = 0;
1345                     }
1346                 }
1347             } else {
1348                 /* Lazy stacking enabled, save necessary info to stack later */
1349                 v7m_update_fpccr(env, frameptr + 0x20, true);
1350             }
1351         }
1352     }
1353 
1354     /*
1355      * If we broke a stack limit then SP was already updated earlier;
1356      * otherwise we update SP regardless of whether any of the stack
1357      * accesses failed or we took some other kind of fault.
1358      */
1359     if (!limitviol) {
1360         env->regs[13] = frameptr;
1361     }
1362 
1363     return !stacked_ok;
1364 }
1365 
1366 static void do_v7m_exception_exit(ARMCPU *cpu)
1367 {
1368     CPUARMState *env = &cpu->env;
1369     uint32_t excret;
1370     uint32_t xpsr, xpsr_mask;
1371     bool ufault = false;
1372     bool sfault = false;
1373     bool return_to_sp_process;
1374     bool return_to_handler;
1375     bool rettobase = false;
1376     bool exc_secure = false;
1377     bool return_to_secure;
1378     bool ftype;
1379     bool restore_s16_s31 = false;
1380 
1381     /*
1382      * If we're not in Handler mode then jumps to magic exception-exit
1383      * addresses don't have magic behaviour. However for the v8M
1384      * security extensions the magic secure-function-return has to
1385      * work in thread mode too, so to avoid doing an extra check in
1386      * the generated code we allow exception-exit magic to also cause the
1387      * internal exception and bring us here in thread mode. Correct code
1388      * will never try to do this (the following insn fetch will always
1389      * fault) so we the overhead of having taken an unnecessary exception
1390      * doesn't matter.
1391      */
1392     if (!arm_v7m_is_handler_mode(env)) {
1393         return;
1394     }
1395 
1396     /*
1397      * In the spec pseudocode ExceptionReturn() is called directly
1398      * from BXWritePC() and gets the full target PC value including
1399      * bit zero. In QEMU's implementation we treat it as a normal
1400      * jump-to-register (which is then caught later on), and so split
1401      * the target value up between env->regs[15] and env->thumb in
1402      * gen_bx(). Reconstitute it.
1403      */
1404     excret = env->regs[15];
1405     if (env->thumb) {
1406         excret |= 1;
1407     }
1408 
1409     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1410                   " previous exception %d\n",
1411                   excret, env->v7m.exception);
1412 
1413     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1414         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1415                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1416                       excret);
1417     }
1418 
1419     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1420 
1421     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1422         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1423                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1424                       "if FPU not present\n",
1425                       excret);
1426         ftype = true;
1427     }
1428 
1429     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1430         /*
1431          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1432          * we pick which FAULTMASK to clear.
1433          */
1434         if (!env->v7m.secure &&
1435             ((excret & R_V7M_EXCRET_ES_MASK) ||
1436              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1437             sfault = 1;
1438             /* For all other purposes, treat ES as 0 (R_HXSR) */
1439             excret &= ~R_V7M_EXCRET_ES_MASK;
1440         }
1441         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1442     }
1443 
1444     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1445         /*
1446          * Auto-clear FAULTMASK on return from other than NMI.
1447          * If the security extension is implemented then this only
1448          * happens if the raw execution priority is >= 0; the
1449          * value of the ES bit in the exception return value indicates
1450          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1451          */
1452         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1453             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1454                 env->v7m.faultmask[exc_secure] = 0;
1455             }
1456         } else {
1457             env->v7m.faultmask[M_REG_NS] = 0;
1458         }
1459     }
1460 
1461     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1462                                      exc_secure)) {
1463     case -1:
1464         /* attempt to exit an exception that isn't active */
1465         ufault = true;
1466         break;
1467     case 0:
1468         /* still an irq active now */
1469         break;
1470     case 1:
1471         /*
1472          * We returned to base exception level, no nesting.
1473          * (In the pseudocode this is written using "NestedActivation != 1"
1474          * where we have 'rettobase == false'.)
1475          */
1476         rettobase = true;
1477         break;
1478     default:
1479         g_assert_not_reached();
1480     }
1481 
1482     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1483     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1484     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1485         (excret & R_V7M_EXCRET_S_MASK);
1486 
1487     if (arm_feature(env, ARM_FEATURE_V8)) {
1488         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1489             /*
1490              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1491              * we choose to take the UsageFault.
1492              */
1493             if ((excret & R_V7M_EXCRET_S_MASK) ||
1494                 (excret & R_V7M_EXCRET_ES_MASK) ||
1495                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1496                 ufault = true;
1497             }
1498         }
1499         if (excret & R_V7M_EXCRET_RES0_MASK) {
1500             ufault = true;
1501         }
1502     } else {
1503         /* For v7M we only recognize certain combinations of the low bits */
1504         switch (excret & 0xf) {
1505         case 1: /* Return to Handler */
1506             break;
1507         case 13: /* Return to Thread using Process stack */
1508         case 9: /* Return to Thread using Main stack */
1509             /*
1510              * We only need to check NONBASETHRDENA for v7M, because in
1511              * v8M this bit does not exist (it is RES1).
1512              */
1513             if (!rettobase &&
1514                 !(env->v7m.ccr[env->v7m.secure] &
1515                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1516                 ufault = true;
1517             }
1518             break;
1519         default:
1520             ufault = true;
1521         }
1522     }
1523 
1524     /*
1525      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1526      * Handler mode (and will be until we write the new XPSR.Interrupt
1527      * field) this does not switch around the current stack pointer.
1528      * We must do this before we do any kind of tailchaining, including
1529      * for the derived exceptions on integrity check failures, or we will
1530      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1531      */
1532     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1533 
1534     /*
1535      * Clear scratch FP values left in caller saved registers; this
1536      * must happen before any kind of tail chaining.
1537      */
1538     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1539         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1540         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1541             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1542             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1543             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1544                           "stackframe: error during lazy state deactivation\n");
1545             v7m_exception_taken(cpu, excret, true, false);
1546             return;
1547         } else {
1548             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1549                 /* v8.1M adds this NOCP check */
1550                 bool nsacr_pass = exc_secure ||
1551                     extract32(env->v7m.nsacr, 10, 1);
1552                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1553                 if (!nsacr_pass) {
1554                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1555                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1556                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1557                         "stackframe: NSACR prevents clearing FPU registers\n");
1558                     v7m_exception_taken(cpu, excret, true, false);
1559                     return;
1560                 } else if (!cpacr_pass) {
1561                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1562                                             exc_secure);
1563                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1564                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1565                         "stackframe: CPACR prevents clearing FPU registers\n");
1566                     v7m_exception_taken(cpu, excret, true, false);
1567                     return;
1568                 }
1569             }
1570             /* Clear s0..s15, FPSCR and VPR */
1571             int i;
1572 
1573             for (i = 0; i < 16; i += 2) {
1574                 *aa32_vfp_dreg(env, i / 2) = 0;
1575             }
1576             vfp_set_fpscr(env, 0);
1577             if (cpu_isar_feature(aa32_mve, cpu)) {
1578                 env->v7m.vpr = 0;
1579             }
1580         }
1581     }
1582 
1583     if (sfault) {
1584         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1585         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1586         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1587                       "stackframe: failed EXC_RETURN.ES validity check\n");
1588         v7m_exception_taken(cpu, excret, true, false);
1589         return;
1590     }
1591 
1592     if (ufault) {
1593         /*
1594          * Bad exception return: instead of popping the exception
1595          * stack, directly take a usage fault on the current stack.
1596          */
1597         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1598         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1599         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1600                       "stackframe: failed exception return integrity check\n");
1601         v7m_exception_taken(cpu, excret, true, false);
1602         return;
1603     }
1604 
1605     /*
1606      * Tailchaining: if there is currently a pending exception that
1607      * is high enough priority to preempt execution at the level we're
1608      * about to return to, then just directly take that exception now,
1609      * avoiding an unstack-and-then-stack. Note that now we have
1610      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1611      * our current execution priority is already the execution priority we are
1612      * returning to -- none of the state we would unstack or set based on
1613      * the EXCRET value affects it.
1614      */
1615     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1616         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1617         v7m_exception_taken(cpu, excret, true, false);
1618         return;
1619     }
1620 
1621     switch_v7m_security_state(env, return_to_secure);
1622 
1623     {
1624         /*
1625          * The stack pointer we should be reading the exception frame from
1626          * depends on bits in the magic exception return type value (and
1627          * for v8M isn't necessarily the stack pointer we will eventually
1628          * end up resuming execution with). Get a pointer to the location
1629          * in the CPU state struct where the SP we need is currently being
1630          * stored; we will use and modify it in place.
1631          * We use this limited C variable scope so we don't accidentally
1632          * use 'frame_sp_p' after we do something that makes it invalid.
1633          */
1634         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1635         uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1636                                                   !return_to_handler, spsel);
1637         uint32_t frameptr = *frame_sp_p;
1638         bool pop_ok = true;
1639         ARMMMUIdx mmu_idx;
1640         bool return_to_priv = return_to_handler ||
1641             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1642 
1643         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1644                                                         return_to_priv);
1645 
1646         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1647             arm_feature(env, ARM_FEATURE_V8)) {
1648             qemu_log_mask(LOG_GUEST_ERROR,
1649                           "M profile exception return with non-8-aligned SP "
1650                           "for destination state is UNPREDICTABLE\n");
1651         }
1652 
1653         /* Do we need to pop callee-saved registers? */
1654         if (return_to_secure &&
1655             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1656              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1657             uint32_t actual_sig;
1658 
1659             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1660 
1661             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1662                 /* Take a SecureFault on the current stack */
1663                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1664                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1665                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1666                               "stackframe: failed exception return integrity "
1667                               "signature check\n");
1668                 v7m_exception_taken(cpu, excret, true, false);
1669                 return;
1670             }
1671 
1672             pop_ok = pop_ok &&
1673                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1674                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1675                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1676                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1677                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1678                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1679                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1680                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1681 
1682             frameptr += 0x28;
1683         }
1684 
1685         /* Pop registers */
1686         pop_ok = pop_ok &&
1687             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1688             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1689             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1690             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1691             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1692             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1693             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1694             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1695 
1696         if (!pop_ok) {
1697             /*
1698              * v7m_stack_read() pended a fault, so take it (as a tail
1699              * chained exception on the same stack frame)
1700              */
1701             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1702             v7m_exception_taken(cpu, excret, true, false);
1703             return;
1704         }
1705 
1706         /*
1707          * Returning from an exception with a PC with bit 0 set is defined
1708          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1709          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1710          * the lsbit, and there are several RTOSes out there which incorrectly
1711          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1712          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1713          * complain about the badly behaved guest.
1714          */
1715         if (env->regs[15] & 1) {
1716             env->regs[15] &= ~1U;
1717             if (!arm_feature(env, ARM_FEATURE_V8)) {
1718                 qemu_log_mask(LOG_GUEST_ERROR,
1719                               "M profile return from interrupt with misaligned "
1720                               "PC is UNPREDICTABLE on v7M\n");
1721             }
1722         }
1723 
1724         if (arm_feature(env, ARM_FEATURE_V8)) {
1725             /*
1726              * For v8M we have to check whether the xPSR exception field
1727              * matches the EXCRET value for return to handler/thread
1728              * before we commit to changing the SP and xPSR.
1729              */
1730             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1731             if (return_to_handler != will_be_handler) {
1732                 /*
1733                  * Take an INVPC UsageFault on the current stack.
1734                  * By this point we will have switched to the security state
1735                  * for the background state, so this UsageFault will target
1736                  * that state.
1737                  */
1738                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1739                                         env->v7m.secure);
1740                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1741                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1742                               "stackframe: failed exception return integrity "
1743                               "check\n");
1744                 v7m_exception_taken(cpu, excret, true, false);
1745                 return;
1746             }
1747         }
1748 
1749         if (!ftype) {
1750             /* FP present and we need to handle it */
1751             if (!return_to_secure &&
1752                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1753                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1754                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1755                 qemu_log_mask(CPU_LOG_INT,
1756                               "...taking SecureFault on existing stackframe: "
1757                               "Secure LSPACT set but exception return is "
1758                               "not to secure state\n");
1759                 v7m_exception_taken(cpu, excret, true, false);
1760                 return;
1761             }
1762 
1763             restore_s16_s31 = return_to_secure &&
1764                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1765 
1766             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1767                 /* State in FPU is still valid, just clear LSPACT */
1768                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1769             } else {
1770                 int i;
1771                 uint32_t fpscr;
1772                 bool cpacr_pass, nsacr_pass;
1773 
1774                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1775                                             return_to_priv);
1776                 nsacr_pass = return_to_secure ||
1777                     extract32(env->v7m.nsacr, 10, 1);
1778 
1779                 if (!cpacr_pass) {
1780                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1781                                             return_to_secure);
1782                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1783                     qemu_log_mask(CPU_LOG_INT,
1784                                   "...taking UsageFault on existing "
1785                                   "stackframe: CPACR.CP10 prevents unstacking "
1786                                   "FP regs\n");
1787                     v7m_exception_taken(cpu, excret, true, false);
1788                     return;
1789                 } else if (!nsacr_pass) {
1790                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1791                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1792                     qemu_log_mask(CPU_LOG_INT,
1793                                   "...taking Secure UsageFault on existing "
1794                                   "stackframe: NSACR.CP10 prevents unstacking "
1795                                   "FP regs\n");
1796                     v7m_exception_taken(cpu, excret, true, false);
1797                     return;
1798                 }
1799 
1800                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1801                     uint32_t slo, shi;
1802                     uint64_t dn;
1803                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1804 
1805                     if (i >= 16) {
1806                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1807                     }
1808 
1809                     pop_ok = pop_ok &&
1810                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1811                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1812 
1813                     if (!pop_ok) {
1814                         break;
1815                     }
1816 
1817                     dn = (uint64_t)shi << 32 | slo;
1818                     *aa32_vfp_dreg(env, i / 2) = dn;
1819                 }
1820                 pop_ok = pop_ok &&
1821                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1822                 if (pop_ok) {
1823                     vfp_set_fpscr(env, fpscr);
1824                 }
1825                 if (cpu_isar_feature(aa32_mve, cpu)) {
1826                     pop_ok = pop_ok &&
1827                         v7m_stack_read(cpu, &env->v7m.vpr,
1828                                        frameptr + 0x64, mmu_idx);
1829                 }
1830                 if (!pop_ok) {
1831                     /*
1832                      * These regs are 0 if security extension present;
1833                      * otherwise merely UNKNOWN. We zero always.
1834                      */
1835                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1836                         *aa32_vfp_dreg(env, i / 2) = 0;
1837                     }
1838                     vfp_set_fpscr(env, 0);
1839                     if (cpu_isar_feature(aa32_mve, cpu)) {
1840                         env->v7m.vpr = 0;
1841                     }
1842                 }
1843             }
1844         }
1845         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1846                                                V7M_CONTROL, FPCA, !ftype);
1847 
1848         /* Commit to consuming the stack frame */
1849         frameptr += 0x20;
1850         if (!ftype) {
1851             frameptr += 0x48;
1852             if (restore_s16_s31) {
1853                 frameptr += 0x40;
1854             }
1855         }
1856         /*
1857          * Undo stack alignment (the SPREALIGN bit indicates that the original
1858          * pre-exception SP was not 8-aligned and we added a padding word to
1859          * align it, so we undo this by ORing in the bit that increases it
1860          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1861          * would work too but a logical OR is how the pseudocode specifies it.)
1862          */
1863         if (xpsr & XPSR_SPREALIGN) {
1864             frameptr |= 4;
1865         }
1866         *frame_sp_p = frameptr;
1867     }
1868 
1869     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1870     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1871         xpsr_mask &= ~XPSR_GE;
1872     }
1873     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1874     xpsr_write(env, xpsr, xpsr_mask);
1875 
1876     if (env->v7m.secure) {
1877         bool sfpa = xpsr & XPSR_SFPA;
1878 
1879         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1880                                                V7M_CONTROL, SFPA, sfpa);
1881     }
1882 
1883     /*
1884      * The restored xPSR exception field will be zero if we're
1885      * resuming in Thread mode. If that doesn't match what the
1886      * exception return excret specified then this is a UsageFault.
1887      * v7M requires we make this check here; v8M did it earlier.
1888      */
1889     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1890         /*
1891          * Take an INVPC UsageFault by pushing the stack again;
1892          * we know we're v7M so this is never a Secure UsageFault.
1893          */
1894         bool ignore_stackfaults;
1895 
1896         assert(!arm_feature(env, ARM_FEATURE_V8));
1897         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1898         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1899         ignore_stackfaults = v7m_push_stack(cpu);
1900         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1901                       "failed exception return integrity check\n");
1902         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1903         return;
1904     }
1905 
1906     /* Otherwise, we have a successful exception exit. */
1907     arm_clear_exclusive(env);
1908     arm_rebuild_hflags(env);
1909     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1910 }
1911 
1912 static bool do_v7m_function_return(ARMCPU *cpu)
1913 {
1914     /*
1915      * v8M security extensions magic function return.
1916      * We may either:
1917      *  (1) throw an exception (longjump)
1918      *  (2) return true if we successfully handled the function return
1919      *  (3) return false if we failed a consistency check and have
1920      *      pended a UsageFault that needs to be taken now
1921      *
1922      * At this point the magic return value is split between env->regs[15]
1923      * and env->thumb. We don't bother to reconstitute it because we don't
1924      * need it (all values are handled the same way).
1925      */
1926     CPUARMState *env = &cpu->env;
1927     uint32_t newpc, newpsr, newpsr_exc;
1928 
1929     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1930 
1931     {
1932         bool threadmode, spsel;
1933         MemOpIdx oi;
1934         ARMMMUIdx mmu_idx;
1935         uint32_t *frame_sp_p;
1936         uint32_t frameptr;
1937 
1938         /* Pull the return address and IPSR from the Secure stack */
1939         threadmode = !arm_v7m_is_handler_mode(env);
1940         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1941 
1942         frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1943         frameptr = *frame_sp_p;
1944 
1945         /*
1946          * These loads may throw an exception (for MPU faults). We want to
1947          * do them as secure, so work out what MMU index that is.
1948          */
1949         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1950         oi = make_memop_idx(MO_LEUL | MO_ALIGN, arm_to_core_mmu_idx(mmu_idx));
1951         newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
1952         newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);
1953 
1954         /* Consistency checks on new IPSR */
1955         newpsr_exc = newpsr & XPSR_EXCP;
1956         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1957               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1958             /* Pend the fault and tell our caller to take it */
1959             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1960             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1961                                     env->v7m.secure);
1962             qemu_log_mask(CPU_LOG_INT,
1963                           "...taking INVPC UsageFault: "
1964                           "IPSR consistency check failed\n");
1965             return false;
1966         }
1967 
1968         *frame_sp_p = frameptr + 8;
1969     }
1970 
1971     /* This invalidates frame_sp_p */
1972     switch_v7m_security_state(env, true);
1973     env->v7m.exception = newpsr_exc;
1974     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1975     if (newpsr & XPSR_SFPA) {
1976         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1977     }
1978     xpsr_write(env, 0, XPSR_IT);
1979     env->thumb = newpc & 1;
1980     env->regs[15] = newpc & ~1;
1981     arm_rebuild_hflags(env);
1982 
1983     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1984     return true;
1985 }
1986 
1987 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
1988                                uint32_t addr, uint16_t *insn)
1989 {
1990     /*
1991      * Load a 16-bit portion of a v7M instruction, returning true on success,
1992      * or false on failure (in which case we will have pended the appropriate
1993      * exception).
1994      * We need to do the instruction fetch's MPU and SAU checks
1995      * like this because there is no MMU index that would allow
1996      * doing the load with a single function call. Instead we must
1997      * first check that the security attributes permit the load
1998      * and that they don't mismatch on the two halves of the instruction,
1999      * and then we do the load as a secure load (ie using the security
2000      * attributes of the address, not the CPU, as architecturally required).
2001      */
2002     CPUState *cs = CPU(cpu);
2003     CPUARMState *env = &cpu->env;
2004     V8M_SAttributes sattrs = {};
2005     GetPhysAddrResult res = {};
2006     ARMMMUFaultInfo fi = {};
2007     MemTxResult txres;
2008 
2009     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
2010     if (!sattrs.nsc || sattrs.ns) {
2011         /*
2012          * This must be the second half of the insn, and it straddles a
2013          * region boundary with the second half not being S&NSC.
2014          */
2015         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2016         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2017         qemu_log_mask(CPU_LOG_INT,
2018                       "...really SecureFault with SFSR.INVEP\n");
2019         return false;
2020     }
2021     if (get_phys_addr(env, addr, MMU_INST_FETCH, 0, mmu_idx, &res, &fi)) {
2022         /* the MPU lookup failed */
2023         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2024         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2025         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2026         return false;
2027     }
2028     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2029                                   res.f.phys_addr, res.f.attrs, &txres);
2030     if (txres != MEMTX_OK) {
2031         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2032         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2033         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2034         return false;
2035     }
2036     return true;
2037 }
2038 
2039 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2040                                    uint32_t addr, uint32_t *spdata)
2041 {
2042     /*
2043      * Read a word of data from the stack for the SG instruction,
2044      * writing the value into *spdata. If the load succeeds, return
2045      * true; otherwise pend an appropriate exception and return false.
2046      * (We can't use data load helpers here that throw an exception
2047      * because of the context we're called in, which is halfway through
2048      * arm_v7m_cpu_do_interrupt().)
2049      */
2050     CPUState *cs = CPU(cpu);
2051     CPUARMState *env = &cpu->env;
2052     MemTxResult txres;
2053     GetPhysAddrResult res = {};
2054     ARMMMUFaultInfo fi = {};
2055     uint32_t value;
2056 
2057     if (get_phys_addr(env, addr, MMU_DATA_LOAD, 0, mmu_idx, &res, &fi)) {
2058         /* MPU/SAU lookup failed */
2059         if (fi.type == ARMFault_QEMU_SFault) {
2060             qemu_log_mask(CPU_LOG_INT,
2061                           "...SecureFault during stack word read\n");
2062             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2063             env->v7m.sfar = addr;
2064             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2065         } else {
2066             qemu_log_mask(CPU_LOG_INT,
2067                           "...MemManageFault during stack word read\n");
2068             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2069                 R_V7M_CFSR_MMARVALID_MASK;
2070             env->v7m.mmfar[M_REG_S] = addr;
2071             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2072         }
2073         return false;
2074     }
2075     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2076                               res.f.phys_addr, res.f.attrs, &txres);
2077     if (txres != MEMTX_OK) {
2078         /* BusFault trying to read the data */
2079         qemu_log_mask(CPU_LOG_INT,
2080                       "...BusFault during stack word read\n");
2081         env->v7m.cfsr[M_REG_NS] |=
2082             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2083         env->v7m.bfar = addr;
2084         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2085         return false;
2086     }
2087 
2088     *spdata = value;
2089     return true;
2090 }
2091 
2092 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2093 {
2094     /*
2095      * Check whether this attempt to execute code in a Secure & NS-Callable
2096      * memory region is for an SG instruction; if so, then emulate the
2097      * effect of the SG instruction and return true. Otherwise pend
2098      * the correct kind of exception and return false.
2099      */
2100     CPUARMState *env = &cpu->env;
2101     ARMMMUIdx mmu_idx;
2102     uint16_t insn;
2103 
2104     /*
2105      * We should never get here unless get_phys_addr_pmsav8() caused
2106      * an exception for NS executing in S&NSC memory.
2107      */
2108     assert(!env->v7m.secure);
2109     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2110 
2111     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2112     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2113 
2114     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2115         return false;
2116     }
2117 
2118     if (!env->thumb) {
2119         goto gen_invep;
2120     }
2121 
2122     if (insn != 0xe97f) {
2123         /*
2124          * Not an SG instruction first half (we choose the IMPDEF
2125          * early-SG-check option).
2126          */
2127         goto gen_invep;
2128     }
2129 
2130     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2131         return false;
2132     }
2133 
2134     if (insn != 0xe97f) {
2135         /*
2136          * Not an SG instruction second half (yes, both halves of the SG
2137          * insn have the same hex value)
2138          */
2139         goto gen_invep;
2140     }
2141 
2142     /*
2143      * OK, we have confirmed that we really have an SG instruction.
2144      * We know we're NS in S memory so don't need to repeat those checks.
2145      */
2146     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2147                   ", executing it\n", env->regs[15]);
2148 
2149     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2150         !arm_v7m_is_handler_mode(env)) {
2151         /*
2152          * v8.1M exception stack frame integrity check. Note that we
2153          * must perform the memory access even if CCR_S.TRD is zero
2154          * and we aren't going to check what the data loaded is.
2155          */
2156         uint32_t spdata, sp;
2157 
2158         /*
2159          * We know we are currently NS, so the S stack pointers must be
2160          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2161          */
2162         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2163         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2164             /* Stack access failed and an exception has been pended */
2165             return false;
2166         }
2167 
2168         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2169             if (((spdata & ~1) == 0xfefa125a) ||
2170                 !(env->v7m.control[M_REG_S] & 1)) {
2171                 goto gen_invep;
2172             }
2173         }
2174     }
2175 
2176     env->regs[14] &= ~1;
2177     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2178     switch_v7m_security_state(env, true);
2179     xpsr_write(env, 0, XPSR_IT);
2180     env->regs[15] += 4;
2181     arm_rebuild_hflags(env);
2182     return true;
2183 
2184 gen_invep:
2185     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2186     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2187     qemu_log_mask(CPU_LOG_INT,
2188                   "...really SecureFault with SFSR.INVEP\n");
2189     return false;
2190 }
2191 
2192 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2193 {
2194     ARMCPU *cpu = ARM_CPU(cs);
2195     CPUARMState *env = &cpu->env;
2196     uint32_t lr;
2197     bool ignore_stackfaults;
2198     uint64_t last_pc = env->regs[15];
2199 
2200     arm_log_exception(cs);
2201 
2202     /*
2203      * For exceptions we just mark as pending on the NVIC, and let that
2204      * handle it.
2205      */
2206     switch (cs->exception_index) {
2207     case EXCP_UDEF:
2208         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2209         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2210         break;
2211     case EXCP_NOCP:
2212     {
2213         /*
2214          * NOCP might be directed to something other than the current
2215          * security state if this fault is because of NSACR; we indicate
2216          * the target security state using exception.target_el.
2217          */
2218         int target_secstate;
2219 
2220         if (env->exception.target_el == 3) {
2221             target_secstate = M_REG_S;
2222         } else {
2223             target_secstate = env->v7m.secure;
2224         }
2225         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2226         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2227         break;
2228     }
2229     case EXCP_INVSTATE:
2230         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2231         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2232         break;
2233     case EXCP_STKOF:
2234         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2235         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2236         break;
2237     case EXCP_LSERR:
2238         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2239         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2240         break;
2241     case EXCP_UNALIGNED:
2242         /* Unaligned faults reported by M-profile aware code */
2243         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2244         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2245         break;
2246     case EXCP_DIVBYZERO:
2247         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2248         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2249         break;
2250     case EXCP_SWI:
2251         /* The PC already points to the next instruction.  */
2252         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2253         break;
2254     case EXCP_PREFETCH_ABORT:
2255     case EXCP_DATA_ABORT:
2256         /*
2257          * Note that for M profile we don't have a guest facing FSR, but
2258          * the env->exception.fsr will be populated by the code that
2259          * raises the fault, in the A profile short-descriptor format.
2260          *
2261          * Log the exception.vaddress now regardless of subtype, because
2262          * logging below only logs it when it goes into a guest visible
2263          * register.
2264          */
2265         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2266                       (uint32_t)env->exception.vaddress);
2267         switch (env->exception.fsr & 0xf) {
2268         case M_FAKE_FSR_NSC_EXEC:
2269             /*
2270              * Exception generated when we try to execute code at an address
2271              * which is marked as Secure & Non-Secure Callable and the CPU
2272              * is in the Non-Secure state. The only instruction which can
2273              * be executed like this is SG (and that only if both halves of
2274              * the SG instruction have the same security attributes.)
2275              * Everything else must generate an INVEP SecureFault, so we
2276              * emulate the SG instruction here.
2277              */
2278             if (v7m_handle_execute_nsc(cpu)) {
2279                 return;
2280             }
2281             break;
2282         case M_FAKE_FSR_SFAULT:
2283             /*
2284              * Various flavours of SecureFault for attempts to execute or
2285              * access data in the wrong security state.
2286              */
2287             switch (cs->exception_index) {
2288             case EXCP_PREFETCH_ABORT:
2289                 if (env->v7m.secure) {
2290                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2291                     qemu_log_mask(CPU_LOG_INT,
2292                                   "...really SecureFault with SFSR.INVTRAN\n");
2293                 } else {
2294                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2295                     qemu_log_mask(CPU_LOG_INT,
2296                                   "...really SecureFault with SFSR.INVEP\n");
2297                 }
2298                 break;
2299             case EXCP_DATA_ABORT:
2300                 /* This must be an NS access to S memory */
2301                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2302                 qemu_log_mask(CPU_LOG_INT,
2303                               "...really SecureFault with SFSR.AUVIOL\n");
2304                 break;
2305             }
2306             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2307             break;
2308         case 0x8: /* External Abort */
2309             switch (cs->exception_index) {
2310             case EXCP_PREFETCH_ABORT:
2311                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2312                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2313                 break;
2314             case EXCP_DATA_ABORT:
2315                 env->v7m.cfsr[M_REG_NS] |=
2316                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2317                 env->v7m.bfar = env->exception.vaddress;
2318                 qemu_log_mask(CPU_LOG_INT,
2319                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2320                               env->v7m.bfar);
2321                 break;
2322             }
2323             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2324             break;
2325         case 0x1: /* Alignment fault reported by generic code */
2326             qemu_log_mask(CPU_LOG_INT,
2327                           "...really UsageFault with UFSR.UNALIGNED\n");
2328             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2329             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2330                                     env->v7m.secure);
2331             break;
2332         default:
2333             /*
2334              * All other FSR values are either MPU faults or "can't happen
2335              * for M profile" cases.
2336              */
2337             switch (cs->exception_index) {
2338             case EXCP_PREFETCH_ABORT:
2339                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2340                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2341                 break;
2342             case EXCP_DATA_ABORT:
2343                 env->v7m.cfsr[env->v7m.secure] |=
2344                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2345                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2346                 qemu_log_mask(CPU_LOG_INT,
2347                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2348                               env->v7m.mmfar[env->v7m.secure]);
2349                 break;
2350             }
2351             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2352                                     env->v7m.secure);
2353             break;
2354         }
2355         break;
2356     case EXCP_SEMIHOST:
2357         qemu_log_mask(CPU_LOG_INT,
2358                       "...handling as semihosting call 0x%x\n",
2359                       env->regs[0]);
2360 #ifdef CONFIG_TCG
2361         do_common_semihosting(cs);
2362 #else
2363         g_assert_not_reached();
2364 #endif
2365         env->regs[15] += env->thumb ? 2 : 4;
2366         qemu_plugin_vcpu_hostcall_cb(cs, last_pc);
2367         return;
2368     case EXCP_BKPT:
2369         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2370         break;
2371     case EXCP_IRQ:
2372         break;
2373     case EXCP_EXCEPTION_EXIT:
2374         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2375             /* Must be v8M security extension function return */
2376             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2377             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2378             if (do_v7m_function_return(cpu)) {
2379                 return;
2380             }
2381         } else {
2382             do_v7m_exception_exit(cpu);
2383             return;
2384         }
2385         break;
2386     case EXCP_LAZYFP:
2387         /*
2388          * We already pended the specific exception in the NVIC in the
2389          * v7m_preserve_fp_state() helper function.
2390          */
2391         break;
2392     default:
2393         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2394         return; /* Never happens.  Keep compiler happy.  */
2395     }
2396 
2397     if (arm_feature(env, ARM_FEATURE_V8)) {
2398         lr = R_V7M_EXCRET_RES1_MASK |
2399             R_V7M_EXCRET_DCRS_MASK;
2400         /*
2401          * The S bit indicates whether we should return to Secure
2402          * or NonSecure (ie our current state).
2403          * The ES bit indicates whether we're taking this exception
2404          * to Secure or NonSecure (ie our target state). We set it
2405          * later, in v7m_exception_taken().
2406          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2407          * This corresponds to the ARM ARM pseudocode for v8M setting
2408          * some LR bits in PushStack() and some in ExceptionTaken();
2409          * the distinction matters for the tailchain cases where we
2410          * can take an exception without pushing the stack.
2411          */
2412         if (env->v7m.secure) {
2413             lr |= R_V7M_EXCRET_S_MASK;
2414         }
2415     } else {
2416         lr = R_V7M_EXCRET_RES1_MASK |
2417             R_V7M_EXCRET_S_MASK |
2418             R_V7M_EXCRET_DCRS_MASK |
2419             R_V7M_EXCRET_ES_MASK;
2420         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2421             lr |= R_V7M_EXCRET_SPSEL_MASK;
2422         }
2423     }
2424     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2425         lr |= R_V7M_EXCRET_FTYPE_MASK;
2426     }
2427     if (!arm_v7m_is_handler_mode(env)) {
2428         lr |= R_V7M_EXCRET_MODE_MASK;
2429     }
2430 
2431     ignore_stackfaults = v7m_push_stack(cpu);
2432     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2433 
2434     arm_do_plugin_vcpu_discon_cb(cs, last_pc);
2435 }
2436 
2437 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2438 {
2439     unsigned el = arm_current_el(env);
2440 
2441     /* First handle registers which unprivileged can read */
2442     switch (reg) {
2443     case 0 ... 7: /* xPSR sub-fields */
2444         return v7m_mrs_xpsr(env, reg, el);
2445     case 20: /* CONTROL */
2446         return arm_v7m_mrs_control(env, env->v7m.secure);
2447     case 0x94: /* CONTROL_NS */
2448         /*
2449          * We have to handle this here because unprivileged Secure code
2450          * can read the NS CONTROL register.
2451          */
2452         if (!env->v7m.secure) {
2453             return 0;
2454         }
2455         return env->v7m.control[M_REG_NS] |
2456             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2457     }
2458 
2459     if (el == 0) {
2460         return 0; /* unprivileged reads others as zero */
2461     }
2462 
2463     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2464         switch (reg) {
2465         case 0x88: /* MSP_NS */
2466             if (!env->v7m.secure) {
2467                 return 0;
2468             }
2469             return env->v7m.other_ss_msp;
2470         case 0x89: /* PSP_NS */
2471             if (!env->v7m.secure) {
2472                 return 0;
2473             }
2474             return env->v7m.other_ss_psp;
2475         case 0x8a: /* MSPLIM_NS */
2476             if (!env->v7m.secure) {
2477                 return 0;
2478             }
2479             return env->v7m.msplim[M_REG_NS];
2480         case 0x8b: /* PSPLIM_NS */
2481             if (!env->v7m.secure) {
2482                 return 0;
2483             }
2484             return env->v7m.psplim[M_REG_NS];
2485         case 0x90: /* PRIMASK_NS */
2486             if (!env->v7m.secure) {
2487                 return 0;
2488             }
2489             return env->v7m.primask[M_REG_NS];
2490         case 0x91: /* BASEPRI_NS */
2491             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2492                 goto bad_reg;
2493             }
2494             if (!env->v7m.secure) {
2495                 return 0;
2496             }
2497             return env->v7m.basepri[M_REG_NS];
2498         case 0x93: /* FAULTMASK_NS */
2499             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2500                 goto bad_reg;
2501             }
2502             if (!env->v7m.secure) {
2503                 return 0;
2504             }
2505             return env->v7m.faultmask[M_REG_NS];
2506         case 0x98: /* SP_NS */
2507         {
2508             /*
2509              * This gives the non-secure SP selected based on whether we're
2510              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2511              */
2512             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2513 
2514             if (!env->v7m.secure) {
2515                 return 0;
2516             }
2517             if (!arm_v7m_is_handler_mode(env) && spsel) {
2518                 return env->v7m.other_ss_psp;
2519             } else {
2520                 return env->v7m.other_ss_msp;
2521             }
2522         }
2523         default:
2524             break;
2525         }
2526     }
2527 
2528     switch (reg) {
2529     case 8: /* MSP */
2530         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2531     case 9: /* PSP */
2532         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2533     case 10: /* MSPLIM */
2534         if (!arm_feature(env, ARM_FEATURE_V8)) {
2535             goto bad_reg;
2536         }
2537         return env->v7m.msplim[env->v7m.secure];
2538     case 11: /* PSPLIM */
2539         if (!arm_feature(env, ARM_FEATURE_V8)) {
2540             goto bad_reg;
2541         }
2542         return env->v7m.psplim[env->v7m.secure];
2543     case 16: /* PRIMASK */
2544         return env->v7m.primask[env->v7m.secure];
2545     case 17: /* BASEPRI */
2546     case 18: /* BASEPRI_MAX */
2547         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2548             goto bad_reg;
2549         }
2550         return env->v7m.basepri[env->v7m.secure];
2551     case 19: /* FAULTMASK */
2552         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2553             goto bad_reg;
2554         }
2555         return env->v7m.faultmask[env->v7m.secure];
2556     default:
2557     bad_reg:
2558         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2559                                        " register %d\n", reg);
2560         return 0;
2561     }
2562 }
2563 
2564 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2565 {
2566     /*
2567      * We're passed bits [11..0] of the instruction; extract
2568      * SYSm and the mask bits.
2569      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2570      * we choose to treat them as if the mask bits were valid.
2571      * NB that the pseudocode 'mask' variable is bits [11..10],
2572      * whereas ours is [11..8].
2573      */
2574     uint32_t mask = extract32(maskreg, 8, 4);
2575     uint32_t reg = extract32(maskreg, 0, 8);
2576     int cur_el = arm_current_el(env);
2577 
2578     if (cur_el == 0 && reg > 7 && reg != 20) {
2579         /*
2580          * only xPSR sub-fields and CONTROL.SFPA may be written by
2581          * unprivileged code
2582          */
2583         return;
2584     }
2585 
2586     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2587         switch (reg) {
2588         case 0x88: /* MSP_NS */
2589             if (!env->v7m.secure) {
2590                 return;
2591             }
2592             env->v7m.other_ss_msp = val & ~3;
2593             return;
2594         case 0x89: /* PSP_NS */
2595             if (!env->v7m.secure) {
2596                 return;
2597             }
2598             env->v7m.other_ss_psp = val & ~3;
2599             return;
2600         case 0x8a: /* MSPLIM_NS */
2601             if (!env->v7m.secure) {
2602                 return;
2603             }
2604             env->v7m.msplim[M_REG_NS] = val & ~7;
2605             return;
2606         case 0x8b: /* PSPLIM_NS */
2607             if (!env->v7m.secure) {
2608                 return;
2609             }
2610             env->v7m.psplim[M_REG_NS] = val & ~7;
2611             return;
2612         case 0x90: /* PRIMASK_NS */
2613             if (!env->v7m.secure) {
2614                 return;
2615             }
2616             env->v7m.primask[M_REG_NS] = val & 1;
2617             return;
2618         case 0x91: /* BASEPRI_NS */
2619             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2620                 goto bad_reg;
2621             }
2622             if (!env->v7m.secure) {
2623                 return;
2624             }
2625             env->v7m.basepri[M_REG_NS] = val & 0xff;
2626             return;
2627         case 0x93: /* FAULTMASK_NS */
2628             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2629                 goto bad_reg;
2630             }
2631             if (!env->v7m.secure) {
2632                 return;
2633             }
2634             env->v7m.faultmask[M_REG_NS] = val & 1;
2635             return;
2636         case 0x94: /* CONTROL_NS */
2637             if (!env->v7m.secure) {
2638                 return;
2639             }
2640             write_v7m_control_spsel_for_secstate(env,
2641                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2642                                                  M_REG_NS);
2643             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2644                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2645                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2646             }
2647             /*
2648              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2649              * RES0 if the FPU is not present, and is stored in the S bank
2650              */
2651             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2652                 extract32(env->v7m.nsacr, 10, 1)) {
2653                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2654                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2655             }
2656             return;
2657         case 0x98: /* SP_NS */
2658         {
2659             /*
2660              * This gives the non-secure SP selected based on whether we're
2661              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2662              */
2663             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2664             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2665             uint32_t limit;
2666 
2667             if (!env->v7m.secure) {
2668                 return;
2669             }
2670 
2671             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2672 
2673             val &= ~0x3;
2674 
2675             if (val < limit) {
2676                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2677             }
2678 
2679             if (is_psp) {
2680                 env->v7m.other_ss_psp = val;
2681             } else {
2682                 env->v7m.other_ss_msp = val;
2683             }
2684             return;
2685         }
2686         default:
2687             break;
2688         }
2689     }
2690 
2691     switch (reg) {
2692     case 0 ... 7: /* xPSR sub-fields */
2693         v7m_msr_xpsr(env, mask, reg, val);
2694         break;
2695     case 8: /* MSP */
2696         if (v7m_using_psp(env)) {
2697             env->v7m.other_sp = val & ~3;
2698         } else {
2699             env->regs[13] = val & ~3;
2700         }
2701         break;
2702     case 9: /* PSP */
2703         if (v7m_using_psp(env)) {
2704             env->regs[13] = val & ~3;
2705         } else {
2706             env->v7m.other_sp = val & ~3;
2707         }
2708         break;
2709     case 10: /* MSPLIM */
2710         if (!arm_feature(env, ARM_FEATURE_V8)) {
2711             goto bad_reg;
2712         }
2713         env->v7m.msplim[env->v7m.secure] = val & ~7;
2714         break;
2715     case 11: /* PSPLIM */
2716         if (!arm_feature(env, ARM_FEATURE_V8)) {
2717             goto bad_reg;
2718         }
2719         env->v7m.psplim[env->v7m.secure] = val & ~7;
2720         break;
2721     case 16: /* PRIMASK */
2722         env->v7m.primask[env->v7m.secure] = val & 1;
2723         break;
2724     case 17: /* BASEPRI */
2725         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2726             goto bad_reg;
2727         }
2728         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2729         break;
2730     case 18: /* BASEPRI_MAX */
2731         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2732             goto bad_reg;
2733         }
2734         val &= 0xff;
2735         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2736                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2737             env->v7m.basepri[env->v7m.secure] = val;
2738         }
2739         break;
2740     case 19: /* FAULTMASK */
2741         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2742             goto bad_reg;
2743         }
2744         env->v7m.faultmask[env->v7m.secure] = val & 1;
2745         break;
2746     case 20: /* CONTROL */
2747         /*
2748          * Writing to the SPSEL bit only has an effect if we are in
2749          * thread mode; other bits can be updated by any privileged code.
2750          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2751          * env->v7m.control, so we only need update the others.
2752          * For v7M, we must just ignore explicit writes to SPSEL in handler
2753          * mode; for v8M the write is permitted but will have no effect.
2754          * All these bits are writes-ignored from non-privileged code,
2755          * except for SFPA.
2756          */
2757         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2758                            !arm_v7m_is_handler_mode(env))) {
2759             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2760         }
2761         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2762             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2763             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2764         }
2765         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2766             /*
2767              * SFPA is RAZ/WI from NS or if no FPU.
2768              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2769              * Both are stored in the S bank.
2770              */
2771             if (env->v7m.secure) {
2772                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2773                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2774             }
2775             if (cur_el > 0 &&
2776                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2777                  extract32(env->v7m.nsacr, 10, 1))) {
2778                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2779                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2780             }
2781         }
2782         break;
2783     default:
2784     bad_reg:
2785         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2786                                        " register %d\n", reg);
2787         return;
2788     }
2789 }
2790 
2791 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2792 {
2793     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2794     bool forceunpriv = op & 1;
2795     bool alt = op & 2;
2796     V8M_SAttributes sattrs = {};
2797     uint32_t tt_resp;
2798     bool r, rw, nsr, nsrw, mrvalid;
2799     ARMMMUIdx mmu_idx;
2800     uint32_t mregion;
2801     bool targetpriv;
2802     bool targetsec = env->v7m.secure;
2803 
2804     /*
2805      * Work out what the security state and privilege level we're
2806      * interested in is...
2807      */
2808     if (alt) {
2809         targetsec = !targetsec;
2810     }
2811 
2812     if (forceunpriv) {
2813         targetpriv = false;
2814     } else {
2815         targetpriv = arm_v7m_is_handler_mode(env) ||
2816             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2817     }
2818 
2819     /* ...and then figure out which MMU index this is */
2820     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2821 
2822     /*
2823      * We know that the MPU and SAU don't care about the access type
2824      * for our purposes beyond that we don't want to claim to be
2825      * an insn fetch, so we arbitrarily call this a read.
2826      */
2827 
2828     /*
2829      * MPU region info only available for privileged or if
2830      * inspecting the other MPU state.
2831      */
2832     if (arm_current_el(env) != 0 || alt) {
2833         GetPhysAddrResult res = {};
2834         ARMMMUFaultInfo fi = {};
2835 
2836         /* We can ignore the return value as prot is always set */
2837         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, PAGE_READ, mmu_idx,
2838                           targetsec, &res, &fi, &mregion);
2839         if (mregion == -1) {
2840             mrvalid = false;
2841             mregion = 0;
2842         } else {
2843             mrvalid = true;
2844         }
2845         r = res.f.prot & PAGE_READ;
2846         rw = res.f.prot & PAGE_WRITE;
2847     } else {
2848         r = false;
2849         rw = false;
2850         mrvalid = false;
2851         mregion = 0;
2852     }
2853 
2854     if (env->v7m.secure) {
2855         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2856                             targetsec, &sattrs);
2857         nsr = sattrs.ns && r;
2858         nsrw = sattrs.ns && rw;
2859     } else {
2860         sattrs.ns = true;
2861         nsr = false;
2862         nsrw = false;
2863     }
2864 
2865     tt_resp = (sattrs.iregion << 24) |
2866         (sattrs.irvalid << 23) |
2867         ((!sattrs.ns) << 22) |
2868         (nsrw << 21) |
2869         (nsr << 20) |
2870         (rw << 19) |
2871         (r << 18) |
2872         (sattrs.srvalid << 17) |
2873         (mrvalid << 16) |
2874         (sattrs.sregion << 8) |
2875         mregion;
2876 
2877     return tt_resp;
2878 }
2879 
2880 #endif /* !CONFIG_USER_ONLY */
2881 
2882 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2883                              bool spsel)
2884 {
2885     /*
2886      * Return a pointer to the location where we currently store the
2887      * stack pointer for the requested security state and thread mode.
2888      * This pointer will become invalid if the CPU state is updated
2889      * such that the stack pointers are switched around (eg changing
2890      * the SPSEL control bit).
2891      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2892      * Unlike that pseudocode, we require the caller to pass us in the
2893      * SPSEL control bit value; this is because we also use this
2894      * function in handling of pushing of the callee-saves registers
2895      * part of the v8M stack frame (pseudocode PushCalleeStack()),
2896      * and in the tailchain codepath the SPSEL bit comes from the exception
2897      * return magic LR value from the previous exception. The pseudocode
2898      * opencodes the stack-selection in PushCalleeStack(), but we prefer
2899      * to make this utility function generic enough to do the job.
2900      */
2901     bool want_psp = threadmode && spsel;
2902 
2903     if (secure == env->v7m.secure) {
2904         if (want_psp == v7m_using_psp(env)) {
2905             return &env->regs[13];
2906         } else {
2907             return &env->v7m.other_sp;
2908         }
2909     } else {
2910         if (want_psp) {
2911             return &env->v7m.other_ss_psp;
2912         } else {
2913             return &env->v7m.other_ss_msp;
2914         }
2915     }
2916 }
2917