xref: /openbmc/qemu/target/arm/tcg/m_helper.c (revision 9c707525)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "cpu-features.h"
13 #include "gdbstub/helpers.h"
14 #include "exec/helper-proto.h"
15 #include "qemu/main-loop.h"
16 #include "qemu/bitops.h"
17 #include "qemu/log.h"
18 #include "exec/exec-all.h"
19 #ifdef CONFIG_TCG
20 #include "exec/cpu_ldst.h"
21 #include "semihosting/common-semi.h"
22 #endif
23 #if !defined(CONFIG_USER_ONLY)
24 #include "hw/intc/armv7m_nvic.h"
25 #endif
26 
27 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
28                          uint32_t reg, uint32_t val)
29 {
30     /* Only APSR is actually writable */
31     if (!(reg & 4)) {
32         uint32_t apsrmask = 0;
33 
34         if (mask & 8) {
35             apsrmask |= XPSR_NZCV | XPSR_Q;
36         }
37         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
38             apsrmask |= XPSR_GE;
39         }
40         xpsr_write(env, val, apsrmask);
41     }
42 }
43 
44 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
45 {
46     uint32_t mask = 0;
47 
48     if ((reg & 1) && el) {
49         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
50     }
51     if (!(reg & 4)) {
52         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
53         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
54             mask |= XPSR_GE;
55         }
56     }
57     /* EPSR reads as zero */
58     return xpsr_read(env) & mask;
59 }
60 
61 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
62 {
63     uint32_t value = env->v7m.control[secure];
64 
65     if (!secure) {
66         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
67         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
68     }
69     return value;
70 }
71 
72 #ifdef CONFIG_USER_ONLY
73 
74 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
75 {
76     uint32_t mask = extract32(maskreg, 8, 4);
77     uint32_t reg = extract32(maskreg, 0, 8);
78 
79     switch (reg) {
80     case 0 ... 7: /* xPSR sub-fields */
81         v7m_msr_xpsr(env, mask, reg, val);
82         break;
83     case 20: /* CONTROL */
84         /* There are no sub-fields that are actually writable from EL0. */
85         break;
86     default:
87         /* Unprivileged writes to other registers are ignored */
88         break;
89     }
90 }
91 
92 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
93 {
94     switch (reg) {
95     case 0 ... 7: /* xPSR sub-fields */
96         return v7m_mrs_xpsr(env, reg, 0);
97     case 20: /* CONTROL */
98         return arm_v7m_mrs_control(env, 0);
99     default:
100         /* Unprivileged reads others as zero.  */
101         return 0;
102     }
103 }
104 
105 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
106 {
107     /* translate.c should never generate calls here in user-only mode */
108     g_assert_not_reached();
109 }
110 
111 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
112 {
113     /* translate.c should never generate calls here in user-only mode */
114     g_assert_not_reached();
115 }
116 
117 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
118 {
119     /* translate.c should never generate calls here in user-only mode */
120     g_assert_not_reached();
121 }
122 
123 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
124 {
125     /* translate.c should never generate calls here in user-only mode */
126     g_assert_not_reached();
127 }
128 
129 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
130 {
131     /* translate.c should never generate calls here in user-only mode */
132     g_assert_not_reached();
133 }
134 
135 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
136 {
137     /*
138      * The TT instructions can be used by unprivileged code, but in
139      * user-only emulation we don't have the MPU.
140      * Luckily since we know we are NonSecure unprivileged (and that in
141      * turn means that the A flag wasn't specified), all the bits in the
142      * register must be zero:
143      *  IREGION: 0 because IRVALID is 0
144      *  IRVALID: 0 because NS
145      *  S: 0 because NS
146      *  NSRW: 0 because NS
147      *  NSR: 0 because NS
148      *  RW: 0 because unpriv and A flag not set
149      *  R: 0 because unpriv and A flag not set
150      *  SRVALID: 0 because NS
151      *  MRVALID: 0 because unpriv and A flag not set
152      *  SREGION: 0 because SRVALID is 0
153      *  MREGION: 0 because MRVALID is 0
154      */
155     return 0;
156 }
157 
158 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
159 {
160     return ARMMMUIdx_MUser;
161 }
162 
163 #else /* !CONFIG_USER_ONLY */
164 
165 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
166                                      bool secstate, bool priv, bool negpri)
167 {
168     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
169 
170     if (priv) {
171         mmu_idx |= ARM_MMU_IDX_M_PRIV;
172     }
173 
174     if (negpri) {
175         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
176     }
177 
178     if (secstate) {
179         mmu_idx |= ARM_MMU_IDX_M_S;
180     }
181 
182     return mmu_idx;
183 }
184 
185 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
186                                                        bool secstate, bool priv)
187 {
188     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
189 
190     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
191 }
192 
193 /* Return the MMU index for a v7M CPU in the specified security state */
194 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
195 {
196     bool priv = arm_v7m_is_handler_mode(env) ||
197         !(env->v7m.control[secstate] & 1);
198 
199     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
200 }
201 
202 /*
203  * What kind of stack write are we doing? This affects how exceptions
204  * generated during the stacking are treated.
205  */
206 typedef enum StackingMode {
207     STACK_NORMAL,
208     STACK_IGNFAULTS,
209     STACK_LAZYFP,
210 } StackingMode;
211 
212 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
213                             ARMMMUIdx mmu_idx, StackingMode mode)
214 {
215     CPUState *cs = CPU(cpu);
216     CPUARMState *env = &cpu->env;
217     MemTxResult txres;
218     GetPhysAddrResult res = {};
219     ARMMMUFaultInfo fi = {};
220     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
221     int exc;
222     bool exc_secure;
223 
224     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
225         /* MPU/SAU lookup failed */
226         if (fi.type == ARMFault_QEMU_SFault) {
227             if (mode == STACK_LAZYFP) {
228                 qemu_log_mask(CPU_LOG_INT,
229                               "...SecureFault with SFSR.LSPERR "
230                               "during lazy stacking\n");
231                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
232             } else {
233                 qemu_log_mask(CPU_LOG_INT,
234                               "...SecureFault with SFSR.AUVIOL "
235                               "during stacking\n");
236                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
237             }
238             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
239             env->v7m.sfar = addr;
240             exc = ARMV7M_EXCP_SECURE;
241             exc_secure = false;
242         } else {
243             if (mode == STACK_LAZYFP) {
244                 qemu_log_mask(CPU_LOG_INT,
245                               "...MemManageFault with CFSR.MLSPERR\n");
246                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
247             } else {
248                 qemu_log_mask(CPU_LOG_INT,
249                               "...MemManageFault with CFSR.MSTKERR\n");
250                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
251             }
252             exc = ARMV7M_EXCP_MEM;
253             exc_secure = secure;
254         }
255         goto pend_fault;
256     }
257     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
258                          value, res.f.attrs, &txres);
259     if (txres != MEMTX_OK) {
260         /* BusFault trying to write the data */
261         if (mode == STACK_LAZYFP) {
262             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
263             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
264         } else {
265             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
266             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
267         }
268         exc = ARMV7M_EXCP_BUS;
269         exc_secure = false;
270         goto pend_fault;
271     }
272     return true;
273 
274 pend_fault:
275     /*
276      * By pending the exception at this point we are making
277      * the IMPDEF choice "overridden exceptions pended" (see the
278      * MergeExcInfo() pseudocode). The other choice would be to not
279      * pend them now and then make a choice about which to throw away
280      * later if we have two derived exceptions.
281      * The only case when we must not pend the exception but instead
282      * throw it away is if we are doing the push of the callee registers
283      * and we've already generated a derived exception (this is indicated
284      * by the caller passing STACK_IGNFAULTS). Even in this case we will
285      * still update the fault status registers.
286      */
287     switch (mode) {
288     case STACK_NORMAL:
289         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
290         break;
291     case STACK_LAZYFP:
292         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
293         break;
294     case STACK_IGNFAULTS:
295         break;
296     }
297     return false;
298 }
299 
300 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
301                            ARMMMUIdx mmu_idx)
302 {
303     CPUState *cs = CPU(cpu);
304     CPUARMState *env = &cpu->env;
305     MemTxResult txres;
306     GetPhysAddrResult res = {};
307     ARMMMUFaultInfo fi = {};
308     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
309     int exc;
310     bool exc_secure;
311     uint32_t value;
312 
313     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
314         /* MPU/SAU lookup failed */
315         if (fi.type == ARMFault_QEMU_SFault) {
316             qemu_log_mask(CPU_LOG_INT,
317                           "...SecureFault with SFSR.AUVIOL during unstack\n");
318             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
319             env->v7m.sfar = addr;
320             exc = ARMV7M_EXCP_SECURE;
321             exc_secure = false;
322         } else {
323             qemu_log_mask(CPU_LOG_INT,
324                           "...MemManageFault with CFSR.MUNSTKERR\n");
325             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
326             exc = ARMV7M_EXCP_MEM;
327             exc_secure = secure;
328         }
329         goto pend_fault;
330     }
331 
332     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
333                               res.f.phys_addr, res.f.attrs, &txres);
334     if (txres != MEMTX_OK) {
335         /* BusFault trying to read the data */
336         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
337         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
338         exc = ARMV7M_EXCP_BUS;
339         exc_secure = false;
340         goto pend_fault;
341     }
342 
343     *dest = value;
344     return true;
345 
346 pend_fault:
347     /*
348      * By pending the exception at this point we are making
349      * the IMPDEF choice "overridden exceptions pended" (see the
350      * MergeExcInfo() pseudocode). The other choice would be to not
351      * pend them now and then make a choice about which to throw away
352      * later if we have two derived exceptions.
353      */
354     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
355     return false;
356 }
357 
358 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
359 {
360     /*
361      * Preserve FP state (because LSPACT was set and we are about
362      * to execute an FP instruction). This corresponds to the
363      * PreserveFPState() pseudocode.
364      * We may throw an exception if the stacking fails.
365      */
366     ARMCPU *cpu = env_archcpu(env);
367     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
368     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
369     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
370     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
371     uint32_t fpcar = env->v7m.fpcar[is_secure];
372     bool stacked_ok = true;
373     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
374     bool take_exception;
375 
376     /* Take the BQL as we are going to touch the NVIC */
377     bql_lock();
378 
379     /* Check the background context had access to the FPU */
380     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
381         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
382         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
383         stacked_ok = false;
384     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
385         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
386         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
387         stacked_ok = false;
388     }
389 
390     if (!splimviol && stacked_ok) {
391         /* We only stack if the stack limit wasn't violated */
392         int i;
393         ARMMMUIdx mmu_idx;
394 
395         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
396         for (i = 0; i < (ts ? 32 : 16); i += 2) {
397             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
398             uint32_t faddr = fpcar + 4 * i;
399             uint32_t slo = extract64(dn, 0, 32);
400             uint32_t shi = extract64(dn, 32, 32);
401 
402             if (i >= 16) {
403                 faddr += 8; /* skip the slot for the FPSCR/VPR */
404             }
405             stacked_ok = stacked_ok &&
406                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
407                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
408         }
409 
410         stacked_ok = stacked_ok &&
411             v7m_stack_write(cpu, fpcar + 0x40,
412                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
413         if (cpu_isar_feature(aa32_mve, cpu)) {
414             stacked_ok = stacked_ok &&
415                 v7m_stack_write(cpu, fpcar + 0x44,
416                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
417         }
418     }
419 
420     /*
421      * We definitely pended an exception, but it's possible that it
422      * might not be able to be taken now. If its priority permits us
423      * to take it now, then we must not update the LSPACT or FP regs,
424      * but instead jump out to take the exception immediately.
425      * If it's just pending and won't be taken until the current
426      * handler exits, then we do update LSPACT and the FP regs.
427      */
428     take_exception = !stacked_ok &&
429         armv7m_nvic_can_take_pending_exception(env->nvic);
430 
431     bql_unlock();
432 
433     if (take_exception) {
434         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
435     }
436 
437     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
438 
439     if (ts) {
440         /* Clear s0 to s31 and the FPSCR and VPR */
441         int i;
442 
443         for (i = 0; i < 32; i += 2) {
444             *aa32_vfp_dreg(env, i / 2) = 0;
445         }
446         vfp_set_fpscr(env, 0);
447         if (cpu_isar_feature(aa32_mve, cpu)) {
448             env->v7m.vpr = 0;
449         }
450     }
451     /*
452      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
453      * unchanged.
454      */
455 }
456 
457 /*
458  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
459  * This may change the current stack pointer between Main and Process
460  * stack pointers if it is done for the CONTROL register for the current
461  * security state.
462  */
463 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
464                                                  bool new_spsel,
465                                                  bool secstate)
466 {
467     bool old_is_psp = v7m_using_psp(env);
468 
469     env->v7m.control[secstate] =
470         deposit32(env->v7m.control[secstate],
471                   R_V7M_CONTROL_SPSEL_SHIFT,
472                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
473 
474     if (secstate == env->v7m.secure) {
475         bool new_is_psp = v7m_using_psp(env);
476         uint32_t tmp;
477 
478         if (old_is_psp != new_is_psp) {
479             tmp = env->v7m.other_sp;
480             env->v7m.other_sp = env->regs[13];
481             env->regs[13] = tmp;
482         }
483     }
484 }
485 
486 /*
487  * Write to v7M CONTROL.SPSEL bit. This may change the current
488  * stack pointer between Main and Process stack pointers.
489  */
490 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
491 {
492     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
493 }
494 
495 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
496 {
497     /*
498      * Write a new value to v7m.exception, thus transitioning into or out
499      * of Handler mode; this may result in a change of active stack pointer.
500      */
501     bool new_is_psp, old_is_psp = v7m_using_psp(env);
502     uint32_t tmp;
503 
504     env->v7m.exception = new_exc;
505 
506     new_is_psp = v7m_using_psp(env);
507 
508     if (old_is_psp != new_is_psp) {
509         tmp = env->v7m.other_sp;
510         env->v7m.other_sp = env->regs[13];
511         env->regs[13] = tmp;
512     }
513 }
514 
515 /* Switch M profile security state between NS and S */
516 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
517 {
518     uint32_t new_ss_msp, new_ss_psp;
519 
520     if (env->v7m.secure == new_secstate) {
521         return;
522     }
523 
524     /*
525      * All the banked state is accessed by looking at env->v7m.secure
526      * except for the stack pointer; rearrange the SP appropriately.
527      */
528     new_ss_msp = env->v7m.other_ss_msp;
529     new_ss_psp = env->v7m.other_ss_psp;
530 
531     if (v7m_using_psp(env)) {
532         env->v7m.other_ss_psp = env->regs[13];
533         env->v7m.other_ss_msp = env->v7m.other_sp;
534     } else {
535         env->v7m.other_ss_msp = env->regs[13];
536         env->v7m.other_ss_psp = env->v7m.other_sp;
537     }
538 
539     env->v7m.secure = new_secstate;
540 
541     if (v7m_using_psp(env)) {
542         env->regs[13] = new_ss_psp;
543         env->v7m.other_sp = new_ss_msp;
544     } else {
545         env->regs[13] = new_ss_msp;
546         env->v7m.other_sp = new_ss_psp;
547     }
548 }
549 
550 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
551 {
552     /*
553      * Handle v7M BXNS:
554      *  - if the return value is a magic value, do exception return (like BX)
555      *  - otherwise bit 0 of the return value is the target security state
556      */
557     uint32_t min_magic;
558 
559     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
560         /* Covers FNC_RETURN and EXC_RETURN magic */
561         min_magic = FNC_RETURN_MIN_MAGIC;
562     } else {
563         /* EXC_RETURN magic only */
564         min_magic = EXC_RETURN_MIN_MAGIC;
565     }
566 
567     if (dest >= min_magic) {
568         /*
569          * This is an exception return magic value; put it where
570          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
571          * Note that if we ever add gen_ss_advance() singlestep support to
572          * M profile this should count as an "instruction execution complete"
573          * event (compare gen_bx_excret_final_code()).
574          */
575         env->regs[15] = dest & ~1;
576         env->thumb = dest & 1;
577         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
578         /* notreached */
579     }
580 
581     /* translate.c should have made BXNS UNDEF unless we're secure */
582     assert(env->v7m.secure);
583 
584     if (!(dest & 1)) {
585         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
586     }
587     switch_v7m_security_state(env, dest & 1);
588     env->thumb = true;
589     env->regs[15] = dest & ~1;
590     arm_rebuild_hflags(env);
591 }
592 
593 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
594 {
595     /*
596      * Handle v7M BLXNS:
597      *  - bit 0 of the destination address is the target security state
598      */
599 
600     /* At this point regs[15] is the address just after the BLXNS */
601     uint32_t nextinst = env->regs[15] | 1;
602     uint32_t sp = env->regs[13] - 8;
603     uint32_t saved_psr;
604 
605     /* translate.c will have made BLXNS UNDEF unless we're secure */
606     assert(env->v7m.secure);
607 
608     if (dest & 1) {
609         /*
610          * Target is Secure, so this is just a normal BLX,
611          * except that the low bit doesn't indicate Thumb/not.
612          */
613         env->regs[14] = nextinst;
614         env->thumb = true;
615         env->regs[15] = dest & ~1;
616         return;
617     }
618 
619     /* Target is non-secure: first push a stack frame */
620     if (!QEMU_IS_ALIGNED(sp, 8)) {
621         qemu_log_mask(LOG_GUEST_ERROR,
622                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
623     }
624 
625     if (sp < v7m_sp_limit(env)) {
626         raise_exception(env, EXCP_STKOF, 0, 1);
627     }
628 
629     saved_psr = env->v7m.exception;
630     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
631         saved_psr |= XPSR_SFPA;
632     }
633 
634     /* Note that these stores can throw exceptions on MPU faults */
635     cpu_stl_data_ra(env, sp, nextinst, GETPC());
636     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
637 
638     env->regs[13] = sp;
639     env->regs[14] = 0xfeffffff;
640     if (arm_v7m_is_handler_mode(env)) {
641         /*
642          * Write a dummy value to IPSR, to avoid leaking the current secure
643          * exception number to non-secure code. This is guaranteed not
644          * to cause write_v7m_exception() to actually change stacks.
645          */
646         write_v7m_exception(env, 1);
647     }
648     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
649     switch_v7m_security_state(env, 0);
650     env->thumb = true;
651     env->regs[15] = dest;
652     arm_rebuild_hflags(env);
653 }
654 
655 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
656                                 uint32_t *pvec)
657 {
658     CPUState *cs = CPU(cpu);
659     CPUARMState *env = &cpu->env;
660     MemTxResult result;
661     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
662     uint32_t vector_entry;
663     MemTxAttrs attrs = {};
664     ARMMMUIdx mmu_idx;
665     bool exc_secure;
666 
667     qemu_log_mask(CPU_LOG_INT,
668                   "...loading from element %d of %s vector table at 0x%x\n",
669                   exc, targets_secure ? "secure" : "non-secure", addr);
670 
671     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
672 
673     /*
674      * We don't do a get_phys_addr() here because the rules for vector
675      * loads are special: they always use the default memory map, and
676      * the default memory map permits reads from all addresses.
677      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
678      * that we want this special case which would always say "yes",
679      * we just do the SAU lookup here followed by a direct physical load.
680      */
681     attrs.secure = targets_secure;
682     attrs.user = false;
683 
684     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
685         V8M_SAttributes sattrs = {};
686 
687         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
688                             targets_secure, &sattrs);
689         if (sattrs.ns) {
690             attrs.secure = false;
691         } else if (!targets_secure) {
692             /*
693              * NS access to S memory: the underlying exception which we escalate
694              * to HardFault is SecureFault, which always targets Secure.
695              */
696             exc_secure = true;
697             goto load_fail;
698         }
699     }
700 
701     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
702                                      attrs, &result);
703     if (result != MEMTX_OK) {
704         /*
705          * Underlying exception is BusFault: its target security state
706          * depends on BFHFNMINS.
707          */
708         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
709         goto load_fail;
710     }
711     *pvec = vector_entry;
712     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
713     return true;
714 
715 load_fail:
716     /*
717      * All vector table fetch fails are reported as HardFault, with
718      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
719      * technically the underlying exception is a SecureFault or BusFault
720      * that is escalated to HardFault.) This is a terminal exception,
721      * so we will either take the HardFault immediately or else enter
722      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
723      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
724      * secure); otherwise it targets the same security state as the
725      * underlying exception.
726      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
727      */
728     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
729         exc_secure = true;
730     }
731     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
732     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
733         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
734     }
735     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
736     return false;
737 }
738 
739 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
740 {
741     /*
742      * Return the integrity signature value for the callee-saves
743      * stack frame section. @lr is the exception return payload/LR value
744      * whose FType bit forms bit 0 of the signature if FP is present.
745      */
746     uint32_t sig = 0xfefa125a;
747 
748     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
749         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
750         sig |= 1;
751     }
752     return sig;
753 }
754 
755 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
756                                   bool ignore_faults)
757 {
758     /*
759      * For v8M, push the callee-saves register part of the stack frame.
760      * Compare the v8M pseudocode PushCalleeStack().
761      * In the tailchaining case this may not be the current stack.
762      */
763     CPUARMState *env = &cpu->env;
764     uint32_t *frame_sp_p;
765     uint32_t frameptr;
766     ARMMMUIdx mmu_idx;
767     bool stacked_ok;
768     uint32_t limit;
769     bool want_psp;
770     uint32_t sig;
771     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
772 
773     if (dotailchain) {
774         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
775         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
776             !mode;
777 
778         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
779         frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
780                                         lr & R_V7M_EXCRET_SPSEL_MASK);
781         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
782         if (want_psp) {
783             limit = env->v7m.psplim[M_REG_S];
784         } else {
785             limit = env->v7m.msplim[M_REG_S];
786         }
787     } else {
788         mmu_idx = arm_mmu_idx(env);
789         frame_sp_p = &env->regs[13];
790         limit = v7m_sp_limit(env);
791     }
792 
793     frameptr = *frame_sp_p - 0x28;
794     if (frameptr < limit) {
795         /*
796          * Stack limit failure: set SP to the limit value, and generate
797          * STKOF UsageFault. Stack pushes below the limit must not be
798          * performed. It is IMPDEF whether pushes above the limit are
799          * performed; we choose not to.
800          */
801         qemu_log_mask(CPU_LOG_INT,
802                       "...STKOF during callee-saves register stacking\n");
803         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
804         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
805                                 env->v7m.secure);
806         *frame_sp_p = limit;
807         return true;
808     }
809 
810     /*
811      * Write as much of the stack frame as we can. A write failure may
812      * cause us to pend a derived exception.
813      */
814     sig = v7m_integrity_sig(env, lr);
815     stacked_ok =
816         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
817         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
818         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
819         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
820         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
821         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
822         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
823         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
824         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
825 
826     /* Update SP regardless of whether any of the stack accesses failed. */
827     *frame_sp_p = frameptr;
828 
829     return !stacked_ok;
830 }
831 
832 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
833                                 bool ignore_stackfaults)
834 {
835     /*
836      * Do the "take the exception" parts of exception entry,
837      * but not the pushing of state to the stack. This is
838      * similar to the pseudocode ExceptionTaken() function.
839      */
840     CPUARMState *env = &cpu->env;
841     uint32_t addr;
842     bool targets_secure;
843     int exc;
844     bool push_failed = false;
845 
846     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
847     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
848                   targets_secure ? "secure" : "nonsecure", exc);
849 
850     if (dotailchain) {
851         /* Sanitize LR FType and PREFIX bits */
852         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
853             lr |= R_V7M_EXCRET_FTYPE_MASK;
854         }
855         lr = deposit32(lr, 24, 8, 0xff);
856     }
857 
858     if (arm_feature(env, ARM_FEATURE_V8)) {
859         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
860             (lr & R_V7M_EXCRET_S_MASK)) {
861             /*
862              * The background code (the owner of the registers in the
863              * exception frame) is Secure. This means it may either already
864              * have or now needs to push callee-saves registers.
865              */
866             if (targets_secure) {
867                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
868                     /*
869                      * We took an exception from Secure to NonSecure
870                      * (which means the callee-saved registers got stacked)
871                      * and are now tailchaining to a Secure exception.
872                      * Clear DCRS so eventual return from this Secure
873                      * exception unstacks the callee-saved registers.
874                      */
875                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
876                 }
877             } else {
878                 /*
879                  * We're going to a non-secure exception; push the
880                  * callee-saves registers to the stack now, if they're
881                  * not already saved.
882                  */
883                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
884                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
885                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
886                                                         ignore_stackfaults);
887                 }
888                 lr |= R_V7M_EXCRET_DCRS_MASK;
889             }
890         }
891 
892         lr &= ~R_V7M_EXCRET_ES_MASK;
893         if (targets_secure) {
894             lr |= R_V7M_EXCRET_ES_MASK;
895         }
896         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
897         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
898             lr |= R_V7M_EXCRET_SPSEL_MASK;
899         }
900 
901         /*
902          * Clear registers if necessary to prevent non-secure exception
903          * code being able to see register values from secure code.
904          * Where register values become architecturally UNKNOWN we leave
905          * them with their previous values. v8.1M is tighter than v8.0M
906          * here and always zeroes the caller-saved registers regardless
907          * of the security state the exception is targeting.
908          */
909         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
910             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
911                 /*
912                  * Always clear the caller-saved registers (they have been
913                  * pushed to the stack earlier in v7m_push_stack()).
914                  * Clear callee-saved registers if the background code is
915                  * Secure (in which case these regs were saved in
916                  * v7m_push_callee_stack()).
917                  */
918                 int i;
919                 /*
920                  * r4..r11 are callee-saves, zero only if background
921                  * state was Secure (EXCRET.S == 1) and exception
922                  * targets Non-secure state
923                  */
924                 bool zero_callee_saves = !targets_secure &&
925                     (lr & R_V7M_EXCRET_S_MASK);
926 
927                 for (i = 0; i < 13; i++) {
928                     if (i < 4 || i > 11 || zero_callee_saves) {
929                         env->regs[i] = 0;
930                     }
931                 }
932                 /* Clear EAPSR */
933                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
934             }
935         }
936     }
937 
938     if (push_failed && !ignore_stackfaults) {
939         /*
940          * Derived exception on callee-saves register stacking:
941          * we might now want to take a different exception which
942          * targets a different security state, so try again from the top.
943          */
944         qemu_log_mask(CPU_LOG_INT,
945                       "...derived exception on callee-saves register stacking");
946         v7m_exception_taken(cpu, lr, true, true);
947         return;
948     }
949 
950     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
951         /* Vector load failed: derived exception */
952         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
953         v7m_exception_taken(cpu, lr, true, true);
954         return;
955     }
956 
957     /*
958      * Now we've done everything that might cause a derived exception
959      * we can go ahead and activate whichever exception we're going to
960      * take (which might now be the derived exception).
961      */
962     armv7m_nvic_acknowledge_irq(env->nvic);
963 
964     /* Switch to target security state -- must do this before writing SPSEL */
965     switch_v7m_security_state(env, targets_secure);
966     write_v7m_control_spsel(env, 0);
967     arm_clear_exclusive(env);
968     /* Clear SFPA and FPCA (has no effect if no FPU) */
969     env->v7m.control[M_REG_S] &=
970         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
971     /* Clear IT bits */
972     env->condexec_bits = 0;
973     env->regs[14] = lr;
974     env->regs[15] = addr & 0xfffffffe;
975     env->thumb = addr & 1;
976     arm_rebuild_hflags(env);
977 }
978 
979 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
980                              bool apply_splim)
981 {
982     /*
983      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
984      * that we will need later in order to do lazy FP reg stacking.
985      */
986     bool is_secure = env->v7m.secure;
987     NVICState *nvic = env->nvic;
988     /*
989      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
990      * are banked and we want to update the bit in the bank for the
991      * current security state; and in one case we want to specifically
992      * update the NS banked version of a bit even if we are secure.
993      */
994     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
995     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
996     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
997     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
998 
999     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1000 
1001     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1002         bool splimviol;
1003         uint32_t splim = v7m_sp_limit(env);
1004         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1005             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1006 
1007         splimviol = !ign && frameptr < splim;
1008         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1009     }
1010 
1011     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1012 
1013     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1014 
1015     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1016 
1017     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1018                         !arm_v7m_is_handler_mode(env));
1019 
1020     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1021     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1022 
1023     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1024     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1025 
1026     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1027     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1028 
1029     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1030     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1031 
1032     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1033     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1034 
1035     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1036         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1037         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1038 
1039         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1040         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1041     }
1042 }
1043 
1044 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1045 {
1046     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1047     ARMCPU *cpu = env_archcpu(env);
1048     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1049     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1050     uintptr_t ra = GETPC();
1051 
1052     assert(env->v7m.secure);
1053 
1054     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1055         return;
1056     }
1057 
1058     /* Check access to the coprocessor is permitted */
1059     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1060         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1061     }
1062 
1063     if (lspact) {
1064         /* LSPACT should not be active when there is active FP state */
1065         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1066     }
1067 
1068     if (fptr & 7) {
1069         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1070     }
1071 
1072     /*
1073      * Note that we do not use v7m_stack_write() here, because the
1074      * accesses should not set the FSR bits for stacking errors if they
1075      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1076      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1077      * and longjmp out.
1078      */
1079     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1080         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1081         int i;
1082 
1083         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1084             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1085             uint32_t faddr = fptr + 4 * i;
1086             uint32_t slo = extract64(dn, 0, 32);
1087             uint32_t shi = extract64(dn, 32, 32);
1088 
1089             if (i >= 16) {
1090                 faddr += 8; /* skip the slot for the FPSCR */
1091             }
1092             cpu_stl_data_ra(env, faddr, slo, ra);
1093             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1094         }
1095         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1096         if (cpu_isar_feature(aa32_mve, cpu)) {
1097             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1098         }
1099 
1100         /*
1101          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1102          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1103          */
1104         if (ts) {
1105             for (i = 0; i < 32; i += 2) {
1106                 *aa32_vfp_dreg(env, i / 2) = 0;
1107             }
1108             vfp_set_fpscr(env, 0);
1109             if (cpu_isar_feature(aa32_mve, cpu)) {
1110                 env->v7m.vpr = 0;
1111             }
1112         }
1113     } else {
1114         v7m_update_fpccr(env, fptr, false);
1115     }
1116 
1117     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1118 }
1119 
1120 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1121 {
1122     ARMCPU *cpu = env_archcpu(env);
1123     uintptr_t ra = GETPC();
1124 
1125     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1126     assert(env->v7m.secure);
1127 
1128     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1129         return;
1130     }
1131 
1132     /* Check access to the coprocessor is permitted */
1133     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1134         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1135     }
1136 
1137     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1138         /* State in FP is still valid */
1139         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1140     } else {
1141         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1142         int i;
1143         uint32_t fpscr;
1144 
1145         if (fptr & 7) {
1146             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1147         }
1148 
1149         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1150             uint32_t slo, shi;
1151             uint64_t dn;
1152             uint32_t faddr = fptr + 4 * i;
1153 
1154             if (i >= 16) {
1155                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1156             }
1157 
1158             slo = cpu_ldl_data_ra(env, faddr, ra);
1159             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1160 
1161             dn = (uint64_t) shi << 32 | slo;
1162             *aa32_vfp_dreg(env, i / 2) = dn;
1163         }
1164         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1165         vfp_set_fpscr(env, fpscr);
1166         if (cpu_isar_feature(aa32_mve, cpu)) {
1167             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1168         }
1169     }
1170 
1171     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1172 }
1173 
1174 static bool v7m_push_stack(ARMCPU *cpu)
1175 {
1176     /*
1177      * Do the "set up stack frame" part of exception entry,
1178      * similar to pseudocode PushStack().
1179      * Return true if we generate a derived exception (and so
1180      * should ignore further stack faults trying to process
1181      * that derived exception.)
1182      */
1183     bool stacked_ok = true, limitviol = false;
1184     CPUARMState *env = &cpu->env;
1185     uint32_t xpsr = xpsr_read(env);
1186     uint32_t frameptr = env->regs[13];
1187     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1188     uint32_t framesize;
1189     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1190 
1191     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1192         (env->v7m.secure || nsacr_cp10)) {
1193         if (env->v7m.secure &&
1194             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1195             framesize = 0xa8;
1196         } else {
1197             framesize = 0x68;
1198         }
1199     } else {
1200         framesize = 0x20;
1201     }
1202 
1203     /* Align stack pointer if the guest wants that */
1204     if ((frameptr & 4) &&
1205         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1206         frameptr -= 4;
1207         xpsr |= XPSR_SPREALIGN;
1208     }
1209 
1210     xpsr &= ~XPSR_SFPA;
1211     if (env->v7m.secure &&
1212         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1213         xpsr |= XPSR_SFPA;
1214     }
1215 
1216     frameptr -= framesize;
1217 
1218     if (arm_feature(env, ARM_FEATURE_V8)) {
1219         uint32_t limit = v7m_sp_limit(env);
1220 
1221         if (frameptr < limit) {
1222             /*
1223              * Stack limit failure: set SP to the limit value, and generate
1224              * STKOF UsageFault. Stack pushes below the limit must not be
1225              * performed. It is IMPDEF whether pushes above the limit are
1226              * performed; we choose not to.
1227              */
1228             qemu_log_mask(CPU_LOG_INT,
1229                           "...STKOF during stacking\n");
1230             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1231             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1232                                     env->v7m.secure);
1233             env->regs[13] = limit;
1234             /*
1235              * We won't try to perform any further memory accesses but
1236              * we must continue through the following code to check for
1237              * permission faults during FPU state preservation, and we
1238              * must update FPCCR if lazy stacking is enabled.
1239              */
1240             limitviol = true;
1241             stacked_ok = false;
1242         }
1243     }
1244 
1245     /*
1246      * Write as much of the stack frame as we can. If we fail a stack
1247      * write this will result in a derived exception being pended
1248      * (which may be taken in preference to the one we started with
1249      * if it has higher priority).
1250      */
1251     stacked_ok = stacked_ok &&
1252         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1253         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1254                         mmu_idx, STACK_NORMAL) &&
1255         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1256                         mmu_idx, STACK_NORMAL) &&
1257         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1258                         mmu_idx, STACK_NORMAL) &&
1259         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1260                         mmu_idx, STACK_NORMAL) &&
1261         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1262                         mmu_idx, STACK_NORMAL) &&
1263         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1264                         mmu_idx, STACK_NORMAL) &&
1265         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1266 
1267     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1268         /* FPU is active, try to save its registers */
1269         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1270         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1271 
1272         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1273             qemu_log_mask(CPU_LOG_INT,
1274                           "...SecureFault because LSPACT and FPCA both set\n");
1275             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1276             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1277         } else if (!env->v7m.secure && !nsacr_cp10) {
1278             qemu_log_mask(CPU_LOG_INT,
1279                           "...Secure UsageFault with CFSR.NOCP because "
1280                           "NSACR.CP10 prevents stacking FP regs\n");
1281             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1282             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1283         } else {
1284             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1285                 /* Lazy stacking disabled, save registers now */
1286                 int i;
1287                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1288                                                  arm_current_el(env) != 0);
1289 
1290                 if (stacked_ok && !cpacr_pass) {
1291                     /*
1292                      * Take UsageFault if CPACR forbids access. The pseudocode
1293                      * here does a full CheckCPEnabled() but we know the NSACR
1294                      * check can never fail as we have already handled that.
1295                      */
1296                     qemu_log_mask(CPU_LOG_INT,
1297                                   "...UsageFault with CFSR.NOCP because "
1298                                   "CPACR.CP10 prevents stacking FP regs\n");
1299                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1300                                             env->v7m.secure);
1301                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1302                     stacked_ok = false;
1303                 }
1304 
1305                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1306                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1307                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1308                     uint32_t slo = extract64(dn, 0, 32);
1309                     uint32_t shi = extract64(dn, 32, 32);
1310 
1311                     if (i >= 16) {
1312                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1313                     }
1314                     stacked_ok = stacked_ok &&
1315                         v7m_stack_write(cpu, faddr, slo,
1316                                         mmu_idx, STACK_NORMAL) &&
1317                         v7m_stack_write(cpu, faddr + 4, shi,
1318                                         mmu_idx, STACK_NORMAL);
1319                 }
1320                 stacked_ok = stacked_ok &&
1321                     v7m_stack_write(cpu, frameptr + 0x60,
1322                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1323                 if (cpu_isar_feature(aa32_mve, cpu)) {
1324                     stacked_ok = stacked_ok &&
1325                         v7m_stack_write(cpu, frameptr + 0x64,
1326                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1327                 }
1328                 if (cpacr_pass) {
1329                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1330                         *aa32_vfp_dreg(env, i / 2) = 0;
1331                     }
1332                     vfp_set_fpscr(env, 0);
1333                     if (cpu_isar_feature(aa32_mve, cpu)) {
1334                         env->v7m.vpr = 0;
1335                     }
1336                 }
1337             } else {
1338                 /* Lazy stacking enabled, save necessary info to stack later */
1339                 v7m_update_fpccr(env, frameptr + 0x20, true);
1340             }
1341         }
1342     }
1343 
1344     /*
1345      * If we broke a stack limit then SP was already updated earlier;
1346      * otherwise we update SP regardless of whether any of the stack
1347      * accesses failed or we took some other kind of fault.
1348      */
1349     if (!limitviol) {
1350         env->regs[13] = frameptr;
1351     }
1352 
1353     return !stacked_ok;
1354 }
1355 
1356 static void do_v7m_exception_exit(ARMCPU *cpu)
1357 {
1358     CPUARMState *env = &cpu->env;
1359     uint32_t excret;
1360     uint32_t xpsr, xpsr_mask;
1361     bool ufault = false;
1362     bool sfault = false;
1363     bool return_to_sp_process;
1364     bool return_to_handler;
1365     bool rettobase = false;
1366     bool exc_secure = false;
1367     bool return_to_secure;
1368     bool ftype;
1369     bool restore_s16_s31 = false;
1370 
1371     /*
1372      * If we're not in Handler mode then jumps to magic exception-exit
1373      * addresses don't have magic behaviour. However for the v8M
1374      * security extensions the magic secure-function-return has to
1375      * work in thread mode too, so to avoid doing an extra check in
1376      * the generated code we allow exception-exit magic to also cause the
1377      * internal exception and bring us here in thread mode. Correct code
1378      * will never try to do this (the following insn fetch will always
1379      * fault) so we the overhead of having taken an unnecessary exception
1380      * doesn't matter.
1381      */
1382     if (!arm_v7m_is_handler_mode(env)) {
1383         return;
1384     }
1385 
1386     /*
1387      * In the spec pseudocode ExceptionReturn() is called directly
1388      * from BXWritePC() and gets the full target PC value including
1389      * bit zero. In QEMU's implementation we treat it as a normal
1390      * jump-to-register (which is then caught later on), and so split
1391      * the target value up between env->regs[15] and env->thumb in
1392      * gen_bx(). Reconstitute it.
1393      */
1394     excret = env->regs[15];
1395     if (env->thumb) {
1396         excret |= 1;
1397     }
1398 
1399     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1400                   " previous exception %d\n",
1401                   excret, env->v7m.exception);
1402 
1403     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1404         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1405                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1406                       excret);
1407     }
1408 
1409     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1410 
1411     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1412         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1413                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1414                       "if FPU not present\n",
1415                       excret);
1416         ftype = true;
1417     }
1418 
1419     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1420         /*
1421          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1422          * we pick which FAULTMASK to clear.
1423          */
1424         if (!env->v7m.secure &&
1425             ((excret & R_V7M_EXCRET_ES_MASK) ||
1426              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1427             sfault = 1;
1428             /* For all other purposes, treat ES as 0 (R_HXSR) */
1429             excret &= ~R_V7M_EXCRET_ES_MASK;
1430         }
1431         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1432     }
1433 
1434     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1435         /*
1436          * Auto-clear FAULTMASK on return from other than NMI.
1437          * If the security extension is implemented then this only
1438          * happens if the raw execution priority is >= 0; the
1439          * value of the ES bit in the exception return value indicates
1440          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1441          */
1442         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1443             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1444                 env->v7m.faultmask[exc_secure] = 0;
1445             }
1446         } else {
1447             env->v7m.faultmask[M_REG_NS] = 0;
1448         }
1449     }
1450 
1451     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1452                                      exc_secure)) {
1453     case -1:
1454         /* attempt to exit an exception that isn't active */
1455         ufault = true;
1456         break;
1457     case 0:
1458         /* still an irq active now */
1459         break;
1460     case 1:
1461         /*
1462          * We returned to base exception level, no nesting.
1463          * (In the pseudocode this is written using "NestedActivation != 1"
1464          * where we have 'rettobase == false'.)
1465          */
1466         rettobase = true;
1467         break;
1468     default:
1469         g_assert_not_reached();
1470     }
1471 
1472     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1473     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1474     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1475         (excret & R_V7M_EXCRET_S_MASK);
1476 
1477     if (arm_feature(env, ARM_FEATURE_V8)) {
1478         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1479             /*
1480              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1481              * we choose to take the UsageFault.
1482              */
1483             if ((excret & R_V7M_EXCRET_S_MASK) ||
1484                 (excret & R_V7M_EXCRET_ES_MASK) ||
1485                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1486                 ufault = true;
1487             }
1488         }
1489         if (excret & R_V7M_EXCRET_RES0_MASK) {
1490             ufault = true;
1491         }
1492     } else {
1493         /* For v7M we only recognize certain combinations of the low bits */
1494         switch (excret & 0xf) {
1495         case 1: /* Return to Handler */
1496             break;
1497         case 13: /* Return to Thread using Process stack */
1498         case 9: /* Return to Thread using Main stack */
1499             /*
1500              * We only need to check NONBASETHRDENA for v7M, because in
1501              * v8M this bit does not exist (it is RES1).
1502              */
1503             if (!rettobase &&
1504                 !(env->v7m.ccr[env->v7m.secure] &
1505                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1506                 ufault = true;
1507             }
1508             break;
1509         default:
1510             ufault = true;
1511         }
1512     }
1513 
1514     /*
1515      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1516      * Handler mode (and will be until we write the new XPSR.Interrupt
1517      * field) this does not switch around the current stack pointer.
1518      * We must do this before we do any kind of tailchaining, including
1519      * for the derived exceptions on integrity check failures, or we will
1520      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1521      */
1522     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1523 
1524     /*
1525      * Clear scratch FP values left in caller saved registers; this
1526      * must happen before any kind of tail chaining.
1527      */
1528     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1529         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1530         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1531             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1532             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1533             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1534                           "stackframe: error during lazy state deactivation\n");
1535             v7m_exception_taken(cpu, excret, true, false);
1536             return;
1537         } else {
1538             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1539                 /* v8.1M adds this NOCP check */
1540                 bool nsacr_pass = exc_secure ||
1541                     extract32(env->v7m.nsacr, 10, 1);
1542                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1543                 if (!nsacr_pass) {
1544                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1545                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1546                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1547                         "stackframe: NSACR prevents clearing FPU registers\n");
1548                     v7m_exception_taken(cpu, excret, true, false);
1549                     return;
1550                 } else if (!cpacr_pass) {
1551                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1552                                             exc_secure);
1553                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1554                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1555                         "stackframe: CPACR prevents clearing FPU registers\n");
1556                     v7m_exception_taken(cpu, excret, true, false);
1557                     return;
1558                 }
1559             }
1560             /* Clear s0..s15, FPSCR and VPR */
1561             int i;
1562 
1563             for (i = 0; i < 16; i += 2) {
1564                 *aa32_vfp_dreg(env, i / 2) = 0;
1565             }
1566             vfp_set_fpscr(env, 0);
1567             if (cpu_isar_feature(aa32_mve, cpu)) {
1568                 env->v7m.vpr = 0;
1569             }
1570         }
1571     }
1572 
1573     if (sfault) {
1574         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1575         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1576         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1577                       "stackframe: failed EXC_RETURN.ES validity check\n");
1578         v7m_exception_taken(cpu, excret, true, false);
1579         return;
1580     }
1581 
1582     if (ufault) {
1583         /*
1584          * Bad exception return: instead of popping the exception
1585          * stack, directly take a usage fault on the current stack.
1586          */
1587         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1588         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1589         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1590                       "stackframe: failed exception return integrity check\n");
1591         v7m_exception_taken(cpu, excret, true, false);
1592         return;
1593     }
1594 
1595     /*
1596      * Tailchaining: if there is currently a pending exception that
1597      * is high enough priority to preempt execution at the level we're
1598      * about to return to, then just directly take that exception now,
1599      * avoiding an unstack-and-then-stack. Note that now we have
1600      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1601      * our current execution priority is already the execution priority we are
1602      * returning to -- none of the state we would unstack or set based on
1603      * the EXCRET value affects it.
1604      */
1605     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1606         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1607         v7m_exception_taken(cpu, excret, true, false);
1608         return;
1609     }
1610 
1611     switch_v7m_security_state(env, return_to_secure);
1612 
1613     {
1614         /*
1615          * The stack pointer we should be reading the exception frame from
1616          * depends on bits in the magic exception return type value (and
1617          * for v8M isn't necessarily the stack pointer we will eventually
1618          * end up resuming execution with). Get a pointer to the location
1619          * in the CPU state struct where the SP we need is currently being
1620          * stored; we will use and modify it in place.
1621          * We use this limited C variable scope so we don't accidentally
1622          * use 'frame_sp_p' after we do something that makes it invalid.
1623          */
1624         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1625         uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1626                                                   !return_to_handler, spsel);
1627         uint32_t frameptr = *frame_sp_p;
1628         bool pop_ok = true;
1629         ARMMMUIdx mmu_idx;
1630         bool return_to_priv = return_to_handler ||
1631             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1632 
1633         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1634                                                         return_to_priv);
1635 
1636         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1637             arm_feature(env, ARM_FEATURE_V8)) {
1638             qemu_log_mask(LOG_GUEST_ERROR,
1639                           "M profile exception return with non-8-aligned SP "
1640                           "for destination state is UNPREDICTABLE\n");
1641         }
1642 
1643         /* Do we need to pop callee-saved registers? */
1644         if (return_to_secure &&
1645             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1646              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1647             uint32_t actual_sig;
1648 
1649             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1650 
1651             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1652                 /* Take a SecureFault on the current stack */
1653                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1654                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1655                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1656                               "stackframe: failed exception return integrity "
1657                               "signature check\n");
1658                 v7m_exception_taken(cpu, excret, true, false);
1659                 return;
1660             }
1661 
1662             pop_ok = pop_ok &&
1663                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1664                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1665                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1666                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1667                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1668                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1669                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1670                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1671 
1672             frameptr += 0x28;
1673         }
1674 
1675         /* Pop registers */
1676         pop_ok = pop_ok &&
1677             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1678             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1679             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1680             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1681             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1682             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1683             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1684             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1685 
1686         if (!pop_ok) {
1687             /*
1688              * v7m_stack_read() pended a fault, so take it (as a tail
1689              * chained exception on the same stack frame)
1690              */
1691             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1692             v7m_exception_taken(cpu, excret, true, false);
1693             return;
1694         }
1695 
1696         /*
1697          * Returning from an exception with a PC with bit 0 set is defined
1698          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1699          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1700          * the lsbit, and there are several RTOSes out there which incorrectly
1701          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1702          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1703          * complain about the badly behaved guest.
1704          */
1705         if (env->regs[15] & 1) {
1706             env->regs[15] &= ~1U;
1707             if (!arm_feature(env, ARM_FEATURE_V8)) {
1708                 qemu_log_mask(LOG_GUEST_ERROR,
1709                               "M profile return from interrupt with misaligned "
1710                               "PC is UNPREDICTABLE on v7M\n");
1711             }
1712         }
1713 
1714         if (arm_feature(env, ARM_FEATURE_V8)) {
1715             /*
1716              * For v8M we have to check whether the xPSR exception field
1717              * matches the EXCRET value for return to handler/thread
1718              * before we commit to changing the SP and xPSR.
1719              */
1720             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1721             if (return_to_handler != will_be_handler) {
1722                 /*
1723                  * Take an INVPC UsageFault on the current stack.
1724                  * By this point we will have switched to the security state
1725                  * for the background state, so this UsageFault will target
1726                  * that state.
1727                  */
1728                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1729                                         env->v7m.secure);
1730                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1731                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1732                               "stackframe: failed exception return integrity "
1733                               "check\n");
1734                 v7m_exception_taken(cpu, excret, true, false);
1735                 return;
1736             }
1737         }
1738 
1739         if (!ftype) {
1740             /* FP present and we need to handle it */
1741             if (!return_to_secure &&
1742                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1743                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1744                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1745                 qemu_log_mask(CPU_LOG_INT,
1746                               "...taking SecureFault on existing stackframe: "
1747                               "Secure LSPACT set but exception return is "
1748                               "not to secure state\n");
1749                 v7m_exception_taken(cpu, excret, true, false);
1750                 return;
1751             }
1752 
1753             restore_s16_s31 = return_to_secure &&
1754                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1755 
1756             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1757                 /* State in FPU is still valid, just clear LSPACT */
1758                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1759             } else {
1760                 int i;
1761                 uint32_t fpscr;
1762                 bool cpacr_pass, nsacr_pass;
1763 
1764                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1765                                             return_to_priv);
1766                 nsacr_pass = return_to_secure ||
1767                     extract32(env->v7m.nsacr, 10, 1);
1768 
1769                 if (!cpacr_pass) {
1770                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1771                                             return_to_secure);
1772                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1773                     qemu_log_mask(CPU_LOG_INT,
1774                                   "...taking UsageFault on existing "
1775                                   "stackframe: CPACR.CP10 prevents unstacking "
1776                                   "FP regs\n");
1777                     v7m_exception_taken(cpu, excret, true, false);
1778                     return;
1779                 } else if (!nsacr_pass) {
1780                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1781                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1782                     qemu_log_mask(CPU_LOG_INT,
1783                                   "...taking Secure UsageFault on existing "
1784                                   "stackframe: NSACR.CP10 prevents unstacking "
1785                                   "FP regs\n");
1786                     v7m_exception_taken(cpu, excret, true, false);
1787                     return;
1788                 }
1789 
1790                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1791                     uint32_t slo, shi;
1792                     uint64_t dn;
1793                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1794 
1795                     if (i >= 16) {
1796                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1797                     }
1798 
1799                     pop_ok = pop_ok &&
1800                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1801                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1802 
1803                     if (!pop_ok) {
1804                         break;
1805                     }
1806 
1807                     dn = (uint64_t)shi << 32 | slo;
1808                     *aa32_vfp_dreg(env, i / 2) = dn;
1809                 }
1810                 pop_ok = pop_ok &&
1811                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1812                 if (pop_ok) {
1813                     vfp_set_fpscr(env, fpscr);
1814                 }
1815                 if (cpu_isar_feature(aa32_mve, cpu)) {
1816                     pop_ok = pop_ok &&
1817                         v7m_stack_read(cpu, &env->v7m.vpr,
1818                                        frameptr + 0x64, mmu_idx);
1819                 }
1820                 if (!pop_ok) {
1821                     /*
1822                      * These regs are 0 if security extension present;
1823                      * otherwise merely UNKNOWN. We zero always.
1824                      */
1825                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1826                         *aa32_vfp_dreg(env, i / 2) = 0;
1827                     }
1828                     vfp_set_fpscr(env, 0);
1829                     if (cpu_isar_feature(aa32_mve, cpu)) {
1830                         env->v7m.vpr = 0;
1831                     }
1832                 }
1833             }
1834         }
1835         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1836                                                V7M_CONTROL, FPCA, !ftype);
1837 
1838         /* Commit to consuming the stack frame */
1839         frameptr += 0x20;
1840         if (!ftype) {
1841             frameptr += 0x48;
1842             if (restore_s16_s31) {
1843                 frameptr += 0x40;
1844             }
1845         }
1846         /*
1847          * Undo stack alignment (the SPREALIGN bit indicates that the original
1848          * pre-exception SP was not 8-aligned and we added a padding word to
1849          * align it, so we undo this by ORing in the bit that increases it
1850          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1851          * would work too but a logical OR is how the pseudocode specifies it.)
1852          */
1853         if (xpsr & XPSR_SPREALIGN) {
1854             frameptr |= 4;
1855         }
1856         *frame_sp_p = frameptr;
1857     }
1858 
1859     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1860     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1861         xpsr_mask &= ~XPSR_GE;
1862     }
1863     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1864     xpsr_write(env, xpsr, xpsr_mask);
1865 
1866     if (env->v7m.secure) {
1867         bool sfpa = xpsr & XPSR_SFPA;
1868 
1869         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1870                                                V7M_CONTROL, SFPA, sfpa);
1871     }
1872 
1873     /*
1874      * The restored xPSR exception field will be zero if we're
1875      * resuming in Thread mode. If that doesn't match what the
1876      * exception return excret specified then this is a UsageFault.
1877      * v7M requires we make this check here; v8M did it earlier.
1878      */
1879     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1880         /*
1881          * Take an INVPC UsageFault by pushing the stack again;
1882          * we know we're v7M so this is never a Secure UsageFault.
1883          */
1884         bool ignore_stackfaults;
1885 
1886         assert(!arm_feature(env, ARM_FEATURE_V8));
1887         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1888         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1889         ignore_stackfaults = v7m_push_stack(cpu);
1890         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1891                       "failed exception return integrity check\n");
1892         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1893         return;
1894     }
1895 
1896     /* Otherwise, we have a successful exception exit. */
1897     arm_clear_exclusive(env);
1898     arm_rebuild_hflags(env);
1899     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1900 }
1901 
1902 static bool do_v7m_function_return(ARMCPU *cpu)
1903 {
1904     /*
1905      * v8M security extensions magic function return.
1906      * We may either:
1907      *  (1) throw an exception (longjump)
1908      *  (2) return true if we successfully handled the function return
1909      *  (3) return false if we failed a consistency check and have
1910      *      pended a UsageFault that needs to be taken now
1911      *
1912      * At this point the magic return value is split between env->regs[15]
1913      * and env->thumb. We don't bother to reconstitute it because we don't
1914      * need it (all values are handled the same way).
1915      */
1916     CPUARMState *env = &cpu->env;
1917     uint32_t newpc, newpsr, newpsr_exc;
1918 
1919     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1920 
1921     {
1922         bool threadmode, spsel;
1923         MemOpIdx oi;
1924         ARMMMUIdx mmu_idx;
1925         uint32_t *frame_sp_p;
1926         uint32_t frameptr;
1927 
1928         /* Pull the return address and IPSR from the Secure stack */
1929         threadmode = !arm_v7m_is_handler_mode(env);
1930         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1931 
1932         frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1933         frameptr = *frame_sp_p;
1934 
1935         /*
1936          * These loads may throw an exception (for MPU faults). We want to
1937          * do them as secure, so work out what MMU index that is.
1938          */
1939         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1940         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1941         newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
1942         newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);
1943 
1944         /* Consistency checks on new IPSR */
1945         newpsr_exc = newpsr & XPSR_EXCP;
1946         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1947               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1948             /* Pend the fault and tell our caller to take it */
1949             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1950             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1951                                     env->v7m.secure);
1952             qemu_log_mask(CPU_LOG_INT,
1953                           "...taking INVPC UsageFault: "
1954                           "IPSR consistency check failed\n");
1955             return false;
1956         }
1957 
1958         *frame_sp_p = frameptr + 8;
1959     }
1960 
1961     /* This invalidates frame_sp_p */
1962     switch_v7m_security_state(env, true);
1963     env->v7m.exception = newpsr_exc;
1964     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1965     if (newpsr & XPSR_SFPA) {
1966         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1967     }
1968     xpsr_write(env, 0, XPSR_IT);
1969     env->thumb = newpc & 1;
1970     env->regs[15] = newpc & ~1;
1971     arm_rebuild_hflags(env);
1972 
1973     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1974     return true;
1975 }
1976 
1977 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
1978                                uint32_t addr, uint16_t *insn)
1979 {
1980     /*
1981      * Load a 16-bit portion of a v7M instruction, returning true on success,
1982      * or false on failure (in which case we will have pended the appropriate
1983      * exception).
1984      * We need to do the instruction fetch's MPU and SAU checks
1985      * like this because there is no MMU index that would allow
1986      * doing the load with a single function call. Instead we must
1987      * first check that the security attributes permit the load
1988      * and that they don't mismatch on the two halves of the instruction,
1989      * and then we do the load as a secure load (ie using the security
1990      * attributes of the address, not the CPU, as architecturally required).
1991      */
1992     CPUState *cs = CPU(cpu);
1993     CPUARMState *env = &cpu->env;
1994     V8M_SAttributes sattrs = {};
1995     GetPhysAddrResult res = {};
1996     ARMMMUFaultInfo fi = {};
1997     MemTxResult txres;
1998 
1999     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
2000     if (!sattrs.nsc || sattrs.ns) {
2001         /*
2002          * This must be the second half of the insn, and it straddles a
2003          * region boundary with the second half not being S&NSC.
2004          */
2005         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2006         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2007         qemu_log_mask(CPU_LOG_INT,
2008                       "...really SecureFault with SFSR.INVEP\n");
2009         return false;
2010     }
2011     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
2012         /* the MPU lookup failed */
2013         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2014         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2015         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2016         return false;
2017     }
2018     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2019                                   res.f.phys_addr, res.f.attrs, &txres);
2020     if (txres != MEMTX_OK) {
2021         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2022         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2023         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2024         return false;
2025     }
2026     return true;
2027 }
2028 
2029 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2030                                    uint32_t addr, uint32_t *spdata)
2031 {
2032     /*
2033      * Read a word of data from the stack for the SG instruction,
2034      * writing the value into *spdata. If the load succeeds, return
2035      * true; otherwise pend an appropriate exception and return false.
2036      * (We can't use data load helpers here that throw an exception
2037      * because of the context we're called in, which is halfway through
2038      * arm_v7m_cpu_do_interrupt().)
2039      */
2040     CPUState *cs = CPU(cpu);
2041     CPUARMState *env = &cpu->env;
2042     MemTxResult txres;
2043     GetPhysAddrResult res = {};
2044     ARMMMUFaultInfo fi = {};
2045     uint32_t value;
2046 
2047     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
2048         /* MPU/SAU lookup failed */
2049         if (fi.type == ARMFault_QEMU_SFault) {
2050             qemu_log_mask(CPU_LOG_INT,
2051                           "...SecureFault during stack word read\n");
2052             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2053             env->v7m.sfar = addr;
2054             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2055         } else {
2056             qemu_log_mask(CPU_LOG_INT,
2057                           "...MemManageFault during stack word read\n");
2058             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2059                 R_V7M_CFSR_MMARVALID_MASK;
2060             env->v7m.mmfar[M_REG_S] = addr;
2061             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2062         }
2063         return false;
2064     }
2065     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2066                               res.f.phys_addr, res.f.attrs, &txres);
2067     if (txres != MEMTX_OK) {
2068         /* BusFault trying to read the data */
2069         qemu_log_mask(CPU_LOG_INT,
2070                       "...BusFault during stack word read\n");
2071         env->v7m.cfsr[M_REG_NS] |=
2072             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2073         env->v7m.bfar = addr;
2074         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2075         return false;
2076     }
2077 
2078     *spdata = value;
2079     return true;
2080 }
2081 
2082 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2083 {
2084     /*
2085      * Check whether this attempt to execute code in a Secure & NS-Callable
2086      * memory region is for an SG instruction; if so, then emulate the
2087      * effect of the SG instruction and return true. Otherwise pend
2088      * the correct kind of exception and return false.
2089      */
2090     CPUARMState *env = &cpu->env;
2091     ARMMMUIdx mmu_idx;
2092     uint16_t insn;
2093 
2094     /*
2095      * We should never get here unless get_phys_addr_pmsav8() caused
2096      * an exception for NS executing in S&NSC memory.
2097      */
2098     assert(!env->v7m.secure);
2099     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2100 
2101     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2102     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2103 
2104     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2105         return false;
2106     }
2107 
2108     if (!env->thumb) {
2109         goto gen_invep;
2110     }
2111 
2112     if (insn != 0xe97f) {
2113         /*
2114          * Not an SG instruction first half (we choose the IMPDEF
2115          * early-SG-check option).
2116          */
2117         goto gen_invep;
2118     }
2119 
2120     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2121         return false;
2122     }
2123 
2124     if (insn != 0xe97f) {
2125         /*
2126          * Not an SG instruction second half (yes, both halves of the SG
2127          * insn have the same hex value)
2128          */
2129         goto gen_invep;
2130     }
2131 
2132     /*
2133      * OK, we have confirmed that we really have an SG instruction.
2134      * We know we're NS in S memory so don't need to repeat those checks.
2135      */
2136     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2137                   ", executing it\n", env->regs[15]);
2138 
2139     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2140         !arm_v7m_is_handler_mode(env)) {
2141         /*
2142          * v8.1M exception stack frame integrity check. Note that we
2143          * must perform the memory access even if CCR_S.TRD is zero
2144          * and we aren't going to check what the data loaded is.
2145          */
2146         uint32_t spdata, sp;
2147 
2148         /*
2149          * We know we are currently NS, so the S stack pointers must be
2150          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2151          */
2152         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2153         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2154             /* Stack access failed and an exception has been pended */
2155             return false;
2156         }
2157 
2158         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2159             if (((spdata & ~1) == 0xfefa125a) ||
2160                 !(env->v7m.control[M_REG_S] & 1)) {
2161                 goto gen_invep;
2162             }
2163         }
2164     }
2165 
2166     env->regs[14] &= ~1;
2167     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2168     switch_v7m_security_state(env, true);
2169     xpsr_write(env, 0, XPSR_IT);
2170     env->regs[15] += 4;
2171     arm_rebuild_hflags(env);
2172     return true;
2173 
2174 gen_invep:
2175     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2176     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2177     qemu_log_mask(CPU_LOG_INT,
2178                   "...really SecureFault with SFSR.INVEP\n");
2179     return false;
2180 }
2181 
2182 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2183 {
2184     ARMCPU *cpu = ARM_CPU(cs);
2185     CPUARMState *env = &cpu->env;
2186     uint32_t lr;
2187     bool ignore_stackfaults;
2188 
2189     arm_log_exception(cs);
2190 
2191     /*
2192      * For exceptions we just mark as pending on the NVIC, and let that
2193      * handle it.
2194      */
2195     switch (cs->exception_index) {
2196     case EXCP_UDEF:
2197         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2198         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2199         break;
2200     case EXCP_NOCP:
2201     {
2202         /*
2203          * NOCP might be directed to something other than the current
2204          * security state if this fault is because of NSACR; we indicate
2205          * the target security state using exception.target_el.
2206          */
2207         int target_secstate;
2208 
2209         if (env->exception.target_el == 3) {
2210             target_secstate = M_REG_S;
2211         } else {
2212             target_secstate = env->v7m.secure;
2213         }
2214         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2215         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2216         break;
2217     }
2218     case EXCP_INVSTATE:
2219         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2220         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2221         break;
2222     case EXCP_STKOF:
2223         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2224         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2225         break;
2226     case EXCP_LSERR:
2227         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2228         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2229         break;
2230     case EXCP_UNALIGNED:
2231         /* Unaligned faults reported by M-profile aware code */
2232         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2233         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2234         break;
2235     case EXCP_DIVBYZERO:
2236         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2237         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2238         break;
2239     case EXCP_SWI:
2240         /* The PC already points to the next instruction.  */
2241         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2242         break;
2243     case EXCP_PREFETCH_ABORT:
2244     case EXCP_DATA_ABORT:
2245         /*
2246          * Note that for M profile we don't have a guest facing FSR, but
2247          * the env->exception.fsr will be populated by the code that
2248          * raises the fault, in the A profile short-descriptor format.
2249          *
2250          * Log the exception.vaddress now regardless of subtype, because
2251          * logging below only logs it when it goes into a guest visible
2252          * register.
2253          */
2254         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2255                       (uint32_t)env->exception.vaddress);
2256         switch (env->exception.fsr & 0xf) {
2257         case M_FAKE_FSR_NSC_EXEC:
2258             /*
2259              * Exception generated when we try to execute code at an address
2260              * which is marked as Secure & Non-Secure Callable and the CPU
2261              * is in the Non-Secure state. The only instruction which can
2262              * be executed like this is SG (and that only if both halves of
2263              * the SG instruction have the same security attributes.)
2264              * Everything else must generate an INVEP SecureFault, so we
2265              * emulate the SG instruction here.
2266              */
2267             if (v7m_handle_execute_nsc(cpu)) {
2268                 return;
2269             }
2270             break;
2271         case M_FAKE_FSR_SFAULT:
2272             /*
2273              * Various flavours of SecureFault for attempts to execute or
2274              * access data in the wrong security state.
2275              */
2276             switch (cs->exception_index) {
2277             case EXCP_PREFETCH_ABORT:
2278                 if (env->v7m.secure) {
2279                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2280                     qemu_log_mask(CPU_LOG_INT,
2281                                   "...really SecureFault with SFSR.INVTRAN\n");
2282                 } else {
2283                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2284                     qemu_log_mask(CPU_LOG_INT,
2285                                   "...really SecureFault with SFSR.INVEP\n");
2286                 }
2287                 break;
2288             case EXCP_DATA_ABORT:
2289                 /* This must be an NS access to S memory */
2290                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2291                 qemu_log_mask(CPU_LOG_INT,
2292                               "...really SecureFault with SFSR.AUVIOL\n");
2293                 break;
2294             }
2295             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2296             break;
2297         case 0x8: /* External Abort */
2298             switch (cs->exception_index) {
2299             case EXCP_PREFETCH_ABORT:
2300                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2301                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2302                 break;
2303             case EXCP_DATA_ABORT:
2304                 env->v7m.cfsr[M_REG_NS] |=
2305                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2306                 env->v7m.bfar = env->exception.vaddress;
2307                 qemu_log_mask(CPU_LOG_INT,
2308                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2309                               env->v7m.bfar);
2310                 break;
2311             }
2312             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2313             break;
2314         case 0x1: /* Alignment fault reported by generic code */
2315             qemu_log_mask(CPU_LOG_INT,
2316                           "...really UsageFault with UFSR.UNALIGNED\n");
2317             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2318             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2319                                     env->v7m.secure);
2320             break;
2321         default:
2322             /*
2323              * All other FSR values are either MPU faults or "can't happen
2324              * for M profile" cases.
2325              */
2326             switch (cs->exception_index) {
2327             case EXCP_PREFETCH_ABORT:
2328                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2329                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2330                 break;
2331             case EXCP_DATA_ABORT:
2332                 env->v7m.cfsr[env->v7m.secure] |=
2333                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2334                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2335                 qemu_log_mask(CPU_LOG_INT,
2336                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2337                               env->v7m.mmfar[env->v7m.secure]);
2338                 break;
2339             }
2340             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2341                                     env->v7m.secure);
2342             break;
2343         }
2344         break;
2345     case EXCP_SEMIHOST:
2346         qemu_log_mask(CPU_LOG_INT,
2347                       "...handling as semihosting call 0x%x\n",
2348                       env->regs[0]);
2349 #ifdef CONFIG_TCG
2350         do_common_semihosting(cs);
2351 #else
2352         g_assert_not_reached();
2353 #endif
2354         env->regs[15] += env->thumb ? 2 : 4;
2355         return;
2356     case EXCP_BKPT:
2357         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2358         break;
2359     case EXCP_IRQ:
2360         break;
2361     case EXCP_EXCEPTION_EXIT:
2362         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2363             /* Must be v8M security extension function return */
2364             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2365             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2366             if (do_v7m_function_return(cpu)) {
2367                 return;
2368             }
2369         } else {
2370             do_v7m_exception_exit(cpu);
2371             return;
2372         }
2373         break;
2374     case EXCP_LAZYFP:
2375         /*
2376          * We already pended the specific exception in the NVIC in the
2377          * v7m_preserve_fp_state() helper function.
2378          */
2379         break;
2380     default:
2381         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2382         return; /* Never happens.  Keep compiler happy.  */
2383     }
2384 
2385     if (arm_feature(env, ARM_FEATURE_V8)) {
2386         lr = R_V7M_EXCRET_RES1_MASK |
2387             R_V7M_EXCRET_DCRS_MASK;
2388         /*
2389          * The S bit indicates whether we should return to Secure
2390          * or NonSecure (ie our current state).
2391          * The ES bit indicates whether we're taking this exception
2392          * to Secure or NonSecure (ie our target state). We set it
2393          * later, in v7m_exception_taken().
2394          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2395          * This corresponds to the ARM ARM pseudocode for v8M setting
2396          * some LR bits in PushStack() and some in ExceptionTaken();
2397          * the distinction matters for the tailchain cases where we
2398          * can take an exception without pushing the stack.
2399          */
2400         if (env->v7m.secure) {
2401             lr |= R_V7M_EXCRET_S_MASK;
2402         }
2403     } else {
2404         lr = R_V7M_EXCRET_RES1_MASK |
2405             R_V7M_EXCRET_S_MASK |
2406             R_V7M_EXCRET_DCRS_MASK |
2407             R_V7M_EXCRET_ES_MASK;
2408         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2409             lr |= R_V7M_EXCRET_SPSEL_MASK;
2410         }
2411     }
2412     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2413         lr |= R_V7M_EXCRET_FTYPE_MASK;
2414     }
2415     if (!arm_v7m_is_handler_mode(env)) {
2416         lr |= R_V7M_EXCRET_MODE_MASK;
2417     }
2418 
2419     ignore_stackfaults = v7m_push_stack(cpu);
2420     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2421 }
2422 
2423 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2424 {
2425     unsigned el = arm_current_el(env);
2426 
2427     /* First handle registers which unprivileged can read */
2428     switch (reg) {
2429     case 0 ... 7: /* xPSR sub-fields */
2430         return v7m_mrs_xpsr(env, reg, el);
2431     case 20: /* CONTROL */
2432         return arm_v7m_mrs_control(env, env->v7m.secure);
2433     case 0x94: /* CONTROL_NS */
2434         /*
2435          * We have to handle this here because unprivileged Secure code
2436          * can read the NS CONTROL register.
2437          */
2438         if (!env->v7m.secure) {
2439             return 0;
2440         }
2441         return env->v7m.control[M_REG_NS] |
2442             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2443     }
2444 
2445     if (el == 0) {
2446         return 0; /* unprivileged reads others as zero */
2447     }
2448 
2449     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2450         switch (reg) {
2451         case 0x88: /* MSP_NS */
2452             if (!env->v7m.secure) {
2453                 return 0;
2454             }
2455             return env->v7m.other_ss_msp;
2456         case 0x89: /* PSP_NS */
2457             if (!env->v7m.secure) {
2458                 return 0;
2459             }
2460             return env->v7m.other_ss_psp;
2461         case 0x8a: /* MSPLIM_NS */
2462             if (!env->v7m.secure) {
2463                 return 0;
2464             }
2465             return env->v7m.msplim[M_REG_NS];
2466         case 0x8b: /* PSPLIM_NS */
2467             if (!env->v7m.secure) {
2468                 return 0;
2469             }
2470             return env->v7m.psplim[M_REG_NS];
2471         case 0x90: /* PRIMASK_NS */
2472             if (!env->v7m.secure) {
2473                 return 0;
2474             }
2475             return env->v7m.primask[M_REG_NS];
2476         case 0x91: /* BASEPRI_NS */
2477             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2478                 goto bad_reg;
2479             }
2480             if (!env->v7m.secure) {
2481                 return 0;
2482             }
2483             return env->v7m.basepri[M_REG_NS];
2484         case 0x93: /* FAULTMASK_NS */
2485             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2486                 goto bad_reg;
2487             }
2488             if (!env->v7m.secure) {
2489                 return 0;
2490             }
2491             return env->v7m.faultmask[M_REG_NS];
2492         case 0x98: /* SP_NS */
2493         {
2494             /*
2495              * This gives the non-secure SP selected based on whether we're
2496              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2497              */
2498             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2499 
2500             if (!env->v7m.secure) {
2501                 return 0;
2502             }
2503             if (!arm_v7m_is_handler_mode(env) && spsel) {
2504                 return env->v7m.other_ss_psp;
2505             } else {
2506                 return env->v7m.other_ss_msp;
2507             }
2508         }
2509         default:
2510             break;
2511         }
2512     }
2513 
2514     switch (reg) {
2515     case 8: /* MSP */
2516         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2517     case 9: /* PSP */
2518         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2519     case 10: /* MSPLIM */
2520         if (!arm_feature(env, ARM_FEATURE_V8)) {
2521             goto bad_reg;
2522         }
2523         return env->v7m.msplim[env->v7m.secure];
2524     case 11: /* PSPLIM */
2525         if (!arm_feature(env, ARM_FEATURE_V8)) {
2526             goto bad_reg;
2527         }
2528         return env->v7m.psplim[env->v7m.secure];
2529     case 16: /* PRIMASK */
2530         return env->v7m.primask[env->v7m.secure];
2531     case 17: /* BASEPRI */
2532     case 18: /* BASEPRI_MAX */
2533         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2534             goto bad_reg;
2535         }
2536         return env->v7m.basepri[env->v7m.secure];
2537     case 19: /* FAULTMASK */
2538         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2539             goto bad_reg;
2540         }
2541         return env->v7m.faultmask[env->v7m.secure];
2542     default:
2543     bad_reg:
2544         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2545                                        " register %d\n", reg);
2546         return 0;
2547     }
2548 }
2549 
2550 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2551 {
2552     /*
2553      * We're passed bits [11..0] of the instruction; extract
2554      * SYSm and the mask bits.
2555      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2556      * we choose to treat them as if the mask bits were valid.
2557      * NB that the pseudocode 'mask' variable is bits [11..10],
2558      * whereas ours is [11..8].
2559      */
2560     uint32_t mask = extract32(maskreg, 8, 4);
2561     uint32_t reg = extract32(maskreg, 0, 8);
2562     int cur_el = arm_current_el(env);
2563 
2564     if (cur_el == 0 && reg > 7 && reg != 20) {
2565         /*
2566          * only xPSR sub-fields and CONTROL.SFPA may be written by
2567          * unprivileged code
2568          */
2569         return;
2570     }
2571 
2572     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2573         switch (reg) {
2574         case 0x88: /* MSP_NS */
2575             if (!env->v7m.secure) {
2576                 return;
2577             }
2578             env->v7m.other_ss_msp = val & ~3;
2579             return;
2580         case 0x89: /* PSP_NS */
2581             if (!env->v7m.secure) {
2582                 return;
2583             }
2584             env->v7m.other_ss_psp = val & ~3;
2585             return;
2586         case 0x8a: /* MSPLIM_NS */
2587             if (!env->v7m.secure) {
2588                 return;
2589             }
2590             env->v7m.msplim[M_REG_NS] = val & ~7;
2591             return;
2592         case 0x8b: /* PSPLIM_NS */
2593             if (!env->v7m.secure) {
2594                 return;
2595             }
2596             env->v7m.psplim[M_REG_NS] = val & ~7;
2597             return;
2598         case 0x90: /* PRIMASK_NS */
2599             if (!env->v7m.secure) {
2600                 return;
2601             }
2602             env->v7m.primask[M_REG_NS] = val & 1;
2603             return;
2604         case 0x91: /* BASEPRI_NS */
2605             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2606                 goto bad_reg;
2607             }
2608             if (!env->v7m.secure) {
2609                 return;
2610             }
2611             env->v7m.basepri[M_REG_NS] = val & 0xff;
2612             return;
2613         case 0x93: /* FAULTMASK_NS */
2614             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2615                 goto bad_reg;
2616             }
2617             if (!env->v7m.secure) {
2618                 return;
2619             }
2620             env->v7m.faultmask[M_REG_NS] = val & 1;
2621             return;
2622         case 0x94: /* CONTROL_NS */
2623             if (!env->v7m.secure) {
2624                 return;
2625             }
2626             write_v7m_control_spsel_for_secstate(env,
2627                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2628                                                  M_REG_NS);
2629             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2630                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2631                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2632             }
2633             /*
2634              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2635              * RES0 if the FPU is not present, and is stored in the S bank
2636              */
2637             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2638                 extract32(env->v7m.nsacr, 10, 1)) {
2639                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2640                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2641             }
2642             return;
2643         case 0x98: /* SP_NS */
2644         {
2645             /*
2646              * This gives the non-secure SP selected based on whether we're
2647              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2648              */
2649             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2650             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2651             uint32_t limit;
2652 
2653             if (!env->v7m.secure) {
2654                 return;
2655             }
2656 
2657             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2658 
2659             val &= ~0x3;
2660 
2661             if (val < limit) {
2662                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2663             }
2664 
2665             if (is_psp) {
2666                 env->v7m.other_ss_psp = val;
2667             } else {
2668                 env->v7m.other_ss_msp = val;
2669             }
2670             return;
2671         }
2672         default:
2673             break;
2674         }
2675     }
2676 
2677     switch (reg) {
2678     case 0 ... 7: /* xPSR sub-fields */
2679         v7m_msr_xpsr(env, mask, reg, val);
2680         break;
2681     case 8: /* MSP */
2682         if (v7m_using_psp(env)) {
2683             env->v7m.other_sp = val & ~3;
2684         } else {
2685             env->regs[13] = val & ~3;
2686         }
2687         break;
2688     case 9: /* PSP */
2689         if (v7m_using_psp(env)) {
2690             env->regs[13] = val & ~3;
2691         } else {
2692             env->v7m.other_sp = val & ~3;
2693         }
2694         break;
2695     case 10: /* MSPLIM */
2696         if (!arm_feature(env, ARM_FEATURE_V8)) {
2697             goto bad_reg;
2698         }
2699         env->v7m.msplim[env->v7m.secure] = val & ~7;
2700         break;
2701     case 11: /* PSPLIM */
2702         if (!arm_feature(env, ARM_FEATURE_V8)) {
2703             goto bad_reg;
2704         }
2705         env->v7m.psplim[env->v7m.secure] = val & ~7;
2706         break;
2707     case 16: /* PRIMASK */
2708         env->v7m.primask[env->v7m.secure] = val & 1;
2709         break;
2710     case 17: /* BASEPRI */
2711         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2712             goto bad_reg;
2713         }
2714         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2715         break;
2716     case 18: /* BASEPRI_MAX */
2717         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2718             goto bad_reg;
2719         }
2720         val &= 0xff;
2721         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2722                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2723             env->v7m.basepri[env->v7m.secure] = val;
2724         }
2725         break;
2726     case 19: /* FAULTMASK */
2727         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2728             goto bad_reg;
2729         }
2730         env->v7m.faultmask[env->v7m.secure] = val & 1;
2731         break;
2732     case 20: /* CONTROL */
2733         /*
2734          * Writing to the SPSEL bit only has an effect if we are in
2735          * thread mode; other bits can be updated by any privileged code.
2736          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2737          * env->v7m.control, so we only need update the others.
2738          * For v7M, we must just ignore explicit writes to SPSEL in handler
2739          * mode; for v8M the write is permitted but will have no effect.
2740          * All these bits are writes-ignored from non-privileged code,
2741          * except for SFPA.
2742          */
2743         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2744                            !arm_v7m_is_handler_mode(env))) {
2745             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2746         }
2747         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2748             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2749             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2750         }
2751         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2752             /*
2753              * SFPA is RAZ/WI from NS or if no FPU.
2754              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2755              * Both are stored in the S bank.
2756              */
2757             if (env->v7m.secure) {
2758                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2759                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2760             }
2761             if (cur_el > 0 &&
2762                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2763                  extract32(env->v7m.nsacr, 10, 1))) {
2764                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2765                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2766             }
2767         }
2768         break;
2769     default:
2770     bad_reg:
2771         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2772                                        " register %d\n", reg);
2773         return;
2774     }
2775 }
2776 
2777 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2778 {
2779     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2780     bool forceunpriv = op & 1;
2781     bool alt = op & 2;
2782     V8M_SAttributes sattrs = {};
2783     uint32_t tt_resp;
2784     bool r, rw, nsr, nsrw, mrvalid;
2785     ARMMMUIdx mmu_idx;
2786     uint32_t mregion;
2787     bool targetpriv;
2788     bool targetsec = env->v7m.secure;
2789 
2790     /*
2791      * Work out what the security state and privilege level we're
2792      * interested in is...
2793      */
2794     if (alt) {
2795         targetsec = !targetsec;
2796     }
2797 
2798     if (forceunpriv) {
2799         targetpriv = false;
2800     } else {
2801         targetpriv = arm_v7m_is_handler_mode(env) ||
2802             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2803     }
2804 
2805     /* ...and then figure out which MMU index this is */
2806     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2807 
2808     /*
2809      * We know that the MPU and SAU don't care about the access type
2810      * for our purposes beyond that we don't want to claim to be
2811      * an insn fetch, so we arbitrarily call this a read.
2812      */
2813 
2814     /*
2815      * MPU region info only available for privileged or if
2816      * inspecting the other MPU state.
2817      */
2818     if (arm_current_el(env) != 0 || alt) {
2819         GetPhysAddrResult res = {};
2820         ARMMMUFaultInfo fi = {};
2821 
2822         /* We can ignore the return value as prot is always set */
2823         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
2824                           &res, &fi, &mregion);
2825         if (mregion == -1) {
2826             mrvalid = false;
2827             mregion = 0;
2828         } else {
2829             mrvalid = true;
2830         }
2831         r = res.f.prot & PAGE_READ;
2832         rw = res.f.prot & PAGE_WRITE;
2833     } else {
2834         r = false;
2835         rw = false;
2836         mrvalid = false;
2837         mregion = 0;
2838     }
2839 
2840     if (env->v7m.secure) {
2841         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2842                             targetsec, &sattrs);
2843         nsr = sattrs.ns && r;
2844         nsrw = sattrs.ns && rw;
2845     } else {
2846         sattrs.ns = true;
2847         nsr = false;
2848         nsrw = false;
2849     }
2850 
2851     tt_resp = (sattrs.iregion << 24) |
2852         (sattrs.irvalid << 23) |
2853         ((!sattrs.ns) << 22) |
2854         (nsrw << 21) |
2855         (nsr << 20) |
2856         (rw << 19) |
2857         (r << 18) |
2858         (sattrs.srvalid << 17) |
2859         (mrvalid << 16) |
2860         (sattrs.sregion << 8) |
2861         mregion;
2862 
2863     return tt_resp;
2864 }
2865 
2866 #endif /* !CONFIG_USER_ONLY */
2867 
2868 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2869                              bool spsel)
2870 {
2871     /*
2872      * Return a pointer to the location where we currently store the
2873      * stack pointer for the requested security state and thread mode.
2874      * This pointer will become invalid if the CPU state is updated
2875      * such that the stack pointers are switched around (eg changing
2876      * the SPSEL control bit).
2877      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2878      * Unlike that pseudocode, we require the caller to pass us in the
2879      * SPSEL control bit value; this is because we also use this
2880      * function in handling of pushing of the callee-saves registers
2881      * part of the v8M stack frame (pseudocode PushCalleeStack()),
2882      * and in the tailchain codepath the SPSEL bit comes from the exception
2883      * return magic LR value from the previous exception. The pseudocode
2884      * opencodes the stack-selection in PushCalleeStack(), but we prefer
2885      * to make this utility function generic enough to do the job.
2886      */
2887     bool want_psp = threadmode && spsel;
2888 
2889     if (secure == env->v7m.secure) {
2890         if (want_psp == v7m_using_psp(env)) {
2891             return &env->regs[13];
2892         } else {
2893             return &env->v7m.other_sp;
2894         }
2895     } else {
2896         if (want_psp) {
2897             return &env->v7m.other_ss_psp;
2898         } else {
2899             return &env->v7m.other_ss_msp;
2900         }
2901     }
2902 }
2903