xref: /openbmc/qemu/target/arm/tcg/m_helper.c (revision fff1aaf4)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "exec/helper-proto.h"
13 #include "qemu/main-loop.h"
14 #include "qemu/bitops.h"
15 #include "qemu/log.h"
16 #include "exec/exec-all.h"
17 #ifdef CONFIG_TCG
18 #include "exec/cpu_ldst.h"
19 #include "semihosting/common-semi.h"
20 #endif
21 #if !defined(CONFIG_USER_ONLY)
22 #include "hw/intc/armv7m_nvic.h"
23 #endif
24 
25 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
26                          uint32_t reg, uint32_t val)
27 {
28     /* Only APSR is actually writable */
29     if (!(reg & 4)) {
30         uint32_t apsrmask = 0;
31 
32         if (mask & 8) {
33             apsrmask |= XPSR_NZCV | XPSR_Q;
34         }
35         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
36             apsrmask |= XPSR_GE;
37         }
38         xpsr_write(env, val, apsrmask);
39     }
40 }
41 
42 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
43 {
44     uint32_t mask = 0;
45 
46     if ((reg & 1) && el) {
47         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
48     }
49     if (!(reg & 4)) {
50         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
51         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
52             mask |= XPSR_GE;
53         }
54     }
55     /* EPSR reads as zero */
56     return xpsr_read(env) & mask;
57 }
58 
59 uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure)
60 {
61     uint32_t value = env->v7m.control[secure];
62 
63     if (!secure) {
64         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
65         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
66     }
67     return value;
68 }
69 
70 #ifdef CONFIG_USER_ONLY
71 
72 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
73 {
74     uint32_t mask = extract32(maskreg, 8, 4);
75     uint32_t reg = extract32(maskreg, 0, 8);
76 
77     switch (reg) {
78     case 0 ... 7: /* xPSR sub-fields */
79         v7m_msr_xpsr(env, mask, reg, val);
80         break;
81     case 20: /* CONTROL */
82         /* There are no sub-fields that are actually writable from EL0. */
83         break;
84     default:
85         /* Unprivileged writes to other registers are ignored */
86         break;
87     }
88 }
89 
90 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
91 {
92     switch (reg) {
93     case 0 ... 7: /* xPSR sub-fields */
94         return v7m_mrs_xpsr(env, reg, 0);
95     case 20: /* CONTROL */
96         return arm_v7m_mrs_control(env, 0);
97     default:
98         /* Unprivileged reads others as zero.  */
99         return 0;
100     }
101 }
102 
103 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
104 {
105     /* translate.c should never generate calls here in user-only mode */
106     g_assert_not_reached();
107 }
108 
109 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
110 {
111     /* translate.c should never generate calls here in user-only mode */
112     g_assert_not_reached();
113 }
114 
115 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
116 {
117     /* translate.c should never generate calls here in user-only mode */
118     g_assert_not_reached();
119 }
120 
121 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
122 {
123     /* translate.c should never generate calls here in user-only mode */
124     g_assert_not_reached();
125 }
126 
127 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
128 {
129     /* translate.c should never generate calls here in user-only mode */
130     g_assert_not_reached();
131 }
132 
133 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
134 {
135     /*
136      * The TT instructions can be used by unprivileged code, but in
137      * user-only emulation we don't have the MPU.
138      * Luckily since we know we are NonSecure unprivileged (and that in
139      * turn means that the A flag wasn't specified), all the bits in the
140      * register must be zero:
141      *  IREGION: 0 because IRVALID is 0
142      *  IRVALID: 0 because NS
143      *  S: 0 because NS
144      *  NSRW: 0 because NS
145      *  NSR: 0 because NS
146      *  RW: 0 because unpriv and A flag not set
147      *  R: 0 because unpriv and A flag not set
148      *  SRVALID: 0 because NS
149      *  MRVALID: 0 because unpriv and A flag not set
150      *  SREGION: 0 becaus SRVALID is 0
151      *  MREGION: 0 because MRVALID is 0
152      */
153     return 0;
154 }
155 
156 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
157 {
158     return ARMMMUIdx_MUser;
159 }
160 
161 #else /* !CONFIG_USER_ONLY */
162 
163 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
164                                      bool secstate, bool priv, bool negpri)
165 {
166     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
167 
168     if (priv) {
169         mmu_idx |= ARM_MMU_IDX_M_PRIV;
170     }
171 
172     if (negpri) {
173         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
174     }
175 
176     if (secstate) {
177         mmu_idx |= ARM_MMU_IDX_M_S;
178     }
179 
180     return mmu_idx;
181 }
182 
183 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
184                                                        bool secstate, bool priv)
185 {
186     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
187 
188     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
189 }
190 
191 /* Return the MMU index for a v7M CPU in the specified security state */
192 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
193 {
194     bool priv = arm_v7m_is_handler_mode(env) ||
195         !(env->v7m.control[secstate] & 1);
196 
197     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
198 }
199 
200 /*
201  * What kind of stack write are we doing? This affects how exceptions
202  * generated during the stacking are treated.
203  */
204 typedef enum StackingMode {
205     STACK_NORMAL,
206     STACK_IGNFAULTS,
207     STACK_LAZYFP,
208 } StackingMode;
209 
210 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
211                             ARMMMUIdx mmu_idx, StackingMode mode)
212 {
213     CPUState *cs = CPU(cpu);
214     CPUARMState *env = &cpu->env;
215     MemTxResult txres;
216     GetPhysAddrResult res = {};
217     ARMMMUFaultInfo fi = {};
218     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
219     int exc;
220     bool exc_secure;
221 
222     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
223         /* MPU/SAU lookup failed */
224         if (fi.type == ARMFault_QEMU_SFault) {
225             if (mode == STACK_LAZYFP) {
226                 qemu_log_mask(CPU_LOG_INT,
227                               "...SecureFault with SFSR.LSPERR "
228                               "during lazy stacking\n");
229                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
230             } else {
231                 qemu_log_mask(CPU_LOG_INT,
232                               "...SecureFault with SFSR.AUVIOL "
233                               "during stacking\n");
234                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
235             }
236             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
237             env->v7m.sfar = addr;
238             exc = ARMV7M_EXCP_SECURE;
239             exc_secure = false;
240         } else {
241             if (mode == STACK_LAZYFP) {
242                 qemu_log_mask(CPU_LOG_INT,
243                               "...MemManageFault with CFSR.MLSPERR\n");
244                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
245             } else {
246                 qemu_log_mask(CPU_LOG_INT,
247                               "...MemManageFault with CFSR.MSTKERR\n");
248                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
249             }
250             exc = ARMV7M_EXCP_MEM;
251             exc_secure = secure;
252         }
253         goto pend_fault;
254     }
255     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
256                          value, res.f.attrs, &txres);
257     if (txres != MEMTX_OK) {
258         /* BusFault trying to write the data */
259         if (mode == STACK_LAZYFP) {
260             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
261             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
262         } else {
263             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
264             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
265         }
266         exc = ARMV7M_EXCP_BUS;
267         exc_secure = false;
268         goto pend_fault;
269     }
270     return true;
271 
272 pend_fault:
273     /*
274      * By pending the exception at this point we are making
275      * the IMPDEF choice "overridden exceptions pended" (see the
276      * MergeExcInfo() pseudocode). The other choice would be to not
277      * pend them now and then make a choice about which to throw away
278      * later if we have two derived exceptions.
279      * The only case when we must not pend the exception but instead
280      * throw it away is if we are doing the push of the callee registers
281      * and we've already generated a derived exception (this is indicated
282      * by the caller passing STACK_IGNFAULTS). Even in this case we will
283      * still update the fault status registers.
284      */
285     switch (mode) {
286     case STACK_NORMAL:
287         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
288         break;
289     case STACK_LAZYFP:
290         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
291         break;
292     case STACK_IGNFAULTS:
293         break;
294     }
295     return false;
296 }
297 
298 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
299                            ARMMMUIdx mmu_idx)
300 {
301     CPUState *cs = CPU(cpu);
302     CPUARMState *env = &cpu->env;
303     MemTxResult txres;
304     GetPhysAddrResult res = {};
305     ARMMMUFaultInfo fi = {};
306     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
307     int exc;
308     bool exc_secure;
309     uint32_t value;
310 
311     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
312         /* MPU/SAU lookup failed */
313         if (fi.type == ARMFault_QEMU_SFault) {
314             qemu_log_mask(CPU_LOG_INT,
315                           "...SecureFault with SFSR.AUVIOL during unstack\n");
316             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
317             env->v7m.sfar = addr;
318             exc = ARMV7M_EXCP_SECURE;
319             exc_secure = false;
320         } else {
321             qemu_log_mask(CPU_LOG_INT,
322                           "...MemManageFault with CFSR.MUNSTKERR\n");
323             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
324             exc = ARMV7M_EXCP_MEM;
325             exc_secure = secure;
326         }
327         goto pend_fault;
328     }
329 
330     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
331                               res.f.phys_addr, res.f.attrs, &txres);
332     if (txres != MEMTX_OK) {
333         /* BusFault trying to read the data */
334         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
335         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
336         exc = ARMV7M_EXCP_BUS;
337         exc_secure = false;
338         goto pend_fault;
339     }
340 
341     *dest = value;
342     return true;
343 
344 pend_fault:
345     /*
346      * By pending the exception at this point we are making
347      * the IMPDEF choice "overridden exceptions pended" (see the
348      * MergeExcInfo() pseudocode). The other choice would be to not
349      * pend them now and then make a choice about which to throw away
350      * later if we have two derived exceptions.
351      */
352     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
353     return false;
354 }
355 
356 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
357 {
358     /*
359      * Preserve FP state (because LSPACT was set and we are about
360      * to execute an FP instruction). This corresponds to the
361      * PreserveFPState() pseudocode.
362      * We may throw an exception if the stacking fails.
363      */
364     ARMCPU *cpu = env_archcpu(env);
365     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
366     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
367     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
368     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
369     uint32_t fpcar = env->v7m.fpcar[is_secure];
370     bool stacked_ok = true;
371     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
372     bool take_exception;
373 
374     /* Take the iothread lock as we are going to touch the NVIC */
375     qemu_mutex_lock_iothread();
376 
377     /* Check the background context had access to the FPU */
378     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
379         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
380         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
381         stacked_ok = false;
382     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
383         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
384         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
385         stacked_ok = false;
386     }
387 
388     if (!splimviol && stacked_ok) {
389         /* We only stack if the stack limit wasn't violated */
390         int i;
391         ARMMMUIdx mmu_idx;
392 
393         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
394         for (i = 0; i < (ts ? 32 : 16); i += 2) {
395             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
396             uint32_t faddr = fpcar + 4 * i;
397             uint32_t slo = extract64(dn, 0, 32);
398             uint32_t shi = extract64(dn, 32, 32);
399 
400             if (i >= 16) {
401                 faddr += 8; /* skip the slot for the FPSCR/VPR */
402             }
403             stacked_ok = stacked_ok &&
404                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
405                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
406         }
407 
408         stacked_ok = stacked_ok &&
409             v7m_stack_write(cpu, fpcar + 0x40,
410                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
411         if (cpu_isar_feature(aa32_mve, cpu)) {
412             stacked_ok = stacked_ok &&
413                 v7m_stack_write(cpu, fpcar + 0x44,
414                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
415         }
416     }
417 
418     /*
419      * We definitely pended an exception, but it's possible that it
420      * might not be able to be taken now. If its priority permits us
421      * to take it now, then we must not update the LSPACT or FP regs,
422      * but instead jump out to take the exception immediately.
423      * If it's just pending and won't be taken until the current
424      * handler exits, then we do update LSPACT and the FP regs.
425      */
426     take_exception = !stacked_ok &&
427         armv7m_nvic_can_take_pending_exception(env->nvic);
428 
429     qemu_mutex_unlock_iothread();
430 
431     if (take_exception) {
432         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
433     }
434 
435     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
436 
437     if (ts) {
438         /* Clear s0 to s31 and the FPSCR and VPR */
439         int i;
440 
441         for (i = 0; i < 32; i += 2) {
442             *aa32_vfp_dreg(env, i / 2) = 0;
443         }
444         vfp_set_fpscr(env, 0);
445         if (cpu_isar_feature(aa32_mve, cpu)) {
446             env->v7m.vpr = 0;
447         }
448     }
449     /*
450      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
451      * unchanged.
452      */
453 }
454 
455 /*
456  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
457  * This may change the current stack pointer between Main and Process
458  * stack pointers if it is done for the CONTROL register for the current
459  * security state.
460  */
461 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
462                                                  bool new_spsel,
463                                                  bool secstate)
464 {
465     bool old_is_psp = v7m_using_psp(env);
466 
467     env->v7m.control[secstate] =
468         deposit32(env->v7m.control[secstate],
469                   R_V7M_CONTROL_SPSEL_SHIFT,
470                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
471 
472     if (secstate == env->v7m.secure) {
473         bool new_is_psp = v7m_using_psp(env);
474         uint32_t tmp;
475 
476         if (old_is_psp != new_is_psp) {
477             tmp = env->v7m.other_sp;
478             env->v7m.other_sp = env->regs[13];
479             env->regs[13] = tmp;
480         }
481     }
482 }
483 
484 /*
485  * Write to v7M CONTROL.SPSEL bit. This may change the current
486  * stack pointer between Main and Process stack pointers.
487  */
488 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
489 {
490     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
491 }
492 
493 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
494 {
495     /*
496      * Write a new value to v7m.exception, thus transitioning into or out
497      * of Handler mode; this may result in a change of active stack pointer.
498      */
499     bool new_is_psp, old_is_psp = v7m_using_psp(env);
500     uint32_t tmp;
501 
502     env->v7m.exception = new_exc;
503 
504     new_is_psp = v7m_using_psp(env);
505 
506     if (old_is_psp != new_is_psp) {
507         tmp = env->v7m.other_sp;
508         env->v7m.other_sp = env->regs[13];
509         env->regs[13] = tmp;
510     }
511 }
512 
513 /* Switch M profile security state between NS and S */
514 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
515 {
516     uint32_t new_ss_msp, new_ss_psp;
517 
518     if (env->v7m.secure == new_secstate) {
519         return;
520     }
521 
522     /*
523      * All the banked state is accessed by looking at env->v7m.secure
524      * except for the stack pointer; rearrange the SP appropriately.
525      */
526     new_ss_msp = env->v7m.other_ss_msp;
527     new_ss_psp = env->v7m.other_ss_psp;
528 
529     if (v7m_using_psp(env)) {
530         env->v7m.other_ss_psp = env->regs[13];
531         env->v7m.other_ss_msp = env->v7m.other_sp;
532     } else {
533         env->v7m.other_ss_msp = env->regs[13];
534         env->v7m.other_ss_psp = env->v7m.other_sp;
535     }
536 
537     env->v7m.secure = new_secstate;
538 
539     if (v7m_using_psp(env)) {
540         env->regs[13] = new_ss_psp;
541         env->v7m.other_sp = new_ss_msp;
542     } else {
543         env->regs[13] = new_ss_msp;
544         env->v7m.other_sp = new_ss_psp;
545     }
546 }
547 
548 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
549 {
550     /*
551      * Handle v7M BXNS:
552      *  - if the return value is a magic value, do exception return (like BX)
553      *  - otherwise bit 0 of the return value is the target security state
554      */
555     uint32_t min_magic;
556 
557     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
558         /* Covers FNC_RETURN and EXC_RETURN magic */
559         min_magic = FNC_RETURN_MIN_MAGIC;
560     } else {
561         /* EXC_RETURN magic only */
562         min_magic = EXC_RETURN_MIN_MAGIC;
563     }
564 
565     if (dest >= min_magic) {
566         /*
567          * This is an exception return magic value; put it where
568          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
569          * Note that if we ever add gen_ss_advance() singlestep support to
570          * M profile this should count as an "instruction execution complete"
571          * event (compare gen_bx_excret_final_code()).
572          */
573         env->regs[15] = dest & ~1;
574         env->thumb = dest & 1;
575         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
576         /* notreached */
577     }
578 
579     /* translate.c should have made BXNS UNDEF unless we're secure */
580     assert(env->v7m.secure);
581 
582     if (!(dest & 1)) {
583         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
584     }
585     switch_v7m_security_state(env, dest & 1);
586     env->thumb = true;
587     env->regs[15] = dest & ~1;
588     arm_rebuild_hflags(env);
589 }
590 
591 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
592 {
593     /*
594      * Handle v7M BLXNS:
595      *  - bit 0 of the destination address is the target security state
596      */
597 
598     /* At this point regs[15] is the address just after the BLXNS */
599     uint32_t nextinst = env->regs[15] | 1;
600     uint32_t sp = env->regs[13] - 8;
601     uint32_t saved_psr;
602 
603     /* translate.c will have made BLXNS UNDEF unless we're secure */
604     assert(env->v7m.secure);
605 
606     if (dest & 1) {
607         /*
608          * Target is Secure, so this is just a normal BLX,
609          * except that the low bit doesn't indicate Thumb/not.
610          */
611         env->regs[14] = nextinst;
612         env->thumb = true;
613         env->regs[15] = dest & ~1;
614         return;
615     }
616 
617     /* Target is non-secure: first push a stack frame */
618     if (!QEMU_IS_ALIGNED(sp, 8)) {
619         qemu_log_mask(LOG_GUEST_ERROR,
620                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
621     }
622 
623     if (sp < v7m_sp_limit(env)) {
624         raise_exception(env, EXCP_STKOF, 0, 1);
625     }
626 
627     saved_psr = env->v7m.exception;
628     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
629         saved_psr |= XPSR_SFPA;
630     }
631 
632     /* Note that these stores can throw exceptions on MPU faults */
633     cpu_stl_data_ra(env, sp, nextinst, GETPC());
634     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
635 
636     env->regs[13] = sp;
637     env->regs[14] = 0xfeffffff;
638     if (arm_v7m_is_handler_mode(env)) {
639         /*
640          * Write a dummy value to IPSR, to avoid leaking the current secure
641          * exception number to non-secure code. This is guaranteed not
642          * to cause write_v7m_exception() to actually change stacks.
643          */
644         write_v7m_exception(env, 1);
645     }
646     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
647     switch_v7m_security_state(env, 0);
648     env->thumb = true;
649     env->regs[15] = dest;
650     arm_rebuild_hflags(env);
651 }
652 
653 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
654                                 uint32_t *pvec)
655 {
656     CPUState *cs = CPU(cpu);
657     CPUARMState *env = &cpu->env;
658     MemTxResult result;
659     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
660     uint32_t vector_entry;
661     MemTxAttrs attrs = {};
662     ARMMMUIdx mmu_idx;
663     bool exc_secure;
664 
665     qemu_log_mask(CPU_LOG_INT,
666                   "...loading from element %d of %s vector table at 0x%x\n",
667                   exc, targets_secure ? "secure" : "non-secure", addr);
668 
669     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
670 
671     /*
672      * We don't do a get_phys_addr() here because the rules for vector
673      * loads are special: they always use the default memory map, and
674      * the default memory map permits reads from all addresses.
675      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
676      * that we want this special case which would always say "yes",
677      * we just do the SAU lookup here followed by a direct physical load.
678      */
679     attrs.secure = targets_secure;
680     attrs.user = false;
681 
682     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
683         V8M_SAttributes sattrs = {};
684 
685         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
686                             targets_secure, &sattrs);
687         if (sattrs.ns) {
688             attrs.secure = false;
689         } else if (!targets_secure) {
690             /*
691              * NS access to S memory: the underlying exception which we escalate
692              * to HardFault is SecureFault, which always targets Secure.
693              */
694             exc_secure = true;
695             goto load_fail;
696         }
697     }
698 
699     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
700                                      attrs, &result);
701     if (result != MEMTX_OK) {
702         /*
703          * Underlying exception is BusFault: its target security state
704          * depends on BFHFNMINS.
705          */
706         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
707         goto load_fail;
708     }
709     *pvec = vector_entry;
710     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
711     return true;
712 
713 load_fail:
714     /*
715      * All vector table fetch fails are reported as HardFault, with
716      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
717      * technically the underlying exception is a SecureFault or BusFault
718      * that is escalated to HardFault.) This is a terminal exception,
719      * so we will either take the HardFault immediately or else enter
720      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
721      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
722      * secure); otherwise it targets the same security state as the
723      * underlying exception.
724      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
725      */
726     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
727         exc_secure = true;
728     }
729     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
730     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
731         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
732     }
733     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
734     return false;
735 }
736 
737 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
738 {
739     /*
740      * Return the integrity signature value for the callee-saves
741      * stack frame section. @lr is the exception return payload/LR value
742      * whose FType bit forms bit 0 of the signature if FP is present.
743      */
744     uint32_t sig = 0xfefa125a;
745 
746     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
747         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
748         sig |= 1;
749     }
750     return sig;
751 }
752 
753 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
754                                   bool ignore_faults)
755 {
756     /*
757      * For v8M, push the callee-saves register part of the stack frame.
758      * Compare the v8M pseudocode PushCalleeStack().
759      * In the tailchaining case this may not be the current stack.
760      */
761     CPUARMState *env = &cpu->env;
762     uint32_t *frame_sp_p;
763     uint32_t frameptr;
764     ARMMMUIdx mmu_idx;
765     bool stacked_ok;
766     uint32_t limit;
767     bool want_psp;
768     uint32_t sig;
769     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
770 
771     if (dotailchain) {
772         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
773         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
774             !mode;
775 
776         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
777         frame_sp_p = arm_v7m_get_sp_ptr(env, M_REG_S, mode,
778                                         lr & R_V7M_EXCRET_SPSEL_MASK);
779         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
780         if (want_psp) {
781             limit = env->v7m.psplim[M_REG_S];
782         } else {
783             limit = env->v7m.msplim[M_REG_S];
784         }
785     } else {
786         mmu_idx = arm_mmu_idx(env);
787         frame_sp_p = &env->regs[13];
788         limit = v7m_sp_limit(env);
789     }
790 
791     frameptr = *frame_sp_p - 0x28;
792     if (frameptr < limit) {
793         /*
794          * Stack limit failure: set SP to the limit value, and generate
795          * STKOF UsageFault. Stack pushes below the limit must not be
796          * performed. It is IMPDEF whether pushes above the limit are
797          * performed; we choose not to.
798          */
799         qemu_log_mask(CPU_LOG_INT,
800                       "...STKOF during callee-saves register stacking\n");
801         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
802         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
803                                 env->v7m.secure);
804         *frame_sp_p = limit;
805         return true;
806     }
807 
808     /*
809      * Write as much of the stack frame as we can. A write failure may
810      * cause us to pend a derived exception.
811      */
812     sig = v7m_integrity_sig(env, lr);
813     stacked_ok =
814         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
815         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
816         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
817         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
818         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
819         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
820         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
821         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
822         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
823 
824     /* Update SP regardless of whether any of the stack accesses failed. */
825     *frame_sp_p = frameptr;
826 
827     return !stacked_ok;
828 }
829 
830 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
831                                 bool ignore_stackfaults)
832 {
833     /*
834      * Do the "take the exception" parts of exception entry,
835      * but not the pushing of state to the stack. This is
836      * similar to the pseudocode ExceptionTaken() function.
837      */
838     CPUARMState *env = &cpu->env;
839     uint32_t addr;
840     bool targets_secure;
841     int exc;
842     bool push_failed = false;
843 
844     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
845     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
846                   targets_secure ? "secure" : "nonsecure", exc);
847 
848     if (dotailchain) {
849         /* Sanitize LR FType and PREFIX bits */
850         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
851             lr |= R_V7M_EXCRET_FTYPE_MASK;
852         }
853         lr = deposit32(lr, 24, 8, 0xff);
854     }
855 
856     if (arm_feature(env, ARM_FEATURE_V8)) {
857         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
858             (lr & R_V7M_EXCRET_S_MASK)) {
859             /*
860              * The background code (the owner of the registers in the
861              * exception frame) is Secure. This means it may either already
862              * have or now needs to push callee-saves registers.
863              */
864             if (targets_secure) {
865                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
866                     /*
867                      * We took an exception from Secure to NonSecure
868                      * (which means the callee-saved registers got stacked)
869                      * and are now tailchaining to a Secure exception.
870                      * Clear DCRS so eventual return from this Secure
871                      * exception unstacks the callee-saved registers.
872                      */
873                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
874                 }
875             } else {
876                 /*
877                  * We're going to a non-secure exception; push the
878                  * callee-saves registers to the stack now, if they're
879                  * not already saved.
880                  */
881                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
882                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
883                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
884                                                         ignore_stackfaults);
885                 }
886                 lr |= R_V7M_EXCRET_DCRS_MASK;
887             }
888         }
889 
890         lr &= ~R_V7M_EXCRET_ES_MASK;
891         if (targets_secure) {
892             lr |= R_V7M_EXCRET_ES_MASK;
893         }
894         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
895         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
896             lr |= R_V7M_EXCRET_SPSEL_MASK;
897         }
898 
899         /*
900          * Clear registers if necessary to prevent non-secure exception
901          * code being able to see register values from secure code.
902          * Where register values become architecturally UNKNOWN we leave
903          * them with their previous values. v8.1M is tighter than v8.0M
904          * here and always zeroes the caller-saved registers regardless
905          * of the security state the exception is targeting.
906          */
907         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
908             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
909                 /*
910                  * Always clear the caller-saved registers (they have been
911                  * pushed to the stack earlier in v7m_push_stack()).
912                  * Clear callee-saved registers if the background code is
913                  * Secure (in which case these regs were saved in
914                  * v7m_push_callee_stack()).
915                  */
916                 int i;
917                 /*
918                  * r4..r11 are callee-saves, zero only if background
919                  * state was Secure (EXCRET.S == 1) and exception
920                  * targets Non-secure state
921                  */
922                 bool zero_callee_saves = !targets_secure &&
923                     (lr & R_V7M_EXCRET_S_MASK);
924 
925                 for (i = 0; i < 13; i++) {
926                     if (i < 4 || i > 11 || zero_callee_saves) {
927                         env->regs[i] = 0;
928                     }
929                 }
930                 /* Clear EAPSR */
931                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
932             }
933         }
934     }
935 
936     if (push_failed && !ignore_stackfaults) {
937         /*
938          * Derived exception on callee-saves register stacking:
939          * we might now want to take a different exception which
940          * targets a different security state, so try again from the top.
941          */
942         qemu_log_mask(CPU_LOG_INT,
943                       "...derived exception on callee-saves register stacking");
944         v7m_exception_taken(cpu, lr, true, true);
945         return;
946     }
947 
948     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
949         /* Vector load failed: derived exception */
950         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
951         v7m_exception_taken(cpu, lr, true, true);
952         return;
953     }
954 
955     /*
956      * Now we've done everything that might cause a derived exception
957      * we can go ahead and activate whichever exception we're going to
958      * take (which might now be the derived exception).
959      */
960     armv7m_nvic_acknowledge_irq(env->nvic);
961 
962     /* Switch to target security state -- must do this before writing SPSEL */
963     switch_v7m_security_state(env, targets_secure);
964     write_v7m_control_spsel(env, 0);
965     arm_clear_exclusive(env);
966     /* Clear SFPA and FPCA (has no effect if no FPU) */
967     env->v7m.control[M_REG_S] &=
968         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
969     /* Clear IT bits */
970     env->condexec_bits = 0;
971     env->regs[14] = lr;
972     env->regs[15] = addr & 0xfffffffe;
973     env->thumb = addr & 1;
974     arm_rebuild_hflags(env);
975 }
976 
977 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
978                              bool apply_splim)
979 {
980     /*
981      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
982      * that we will need later in order to do lazy FP reg stacking.
983      */
984     bool is_secure = env->v7m.secure;
985     NVICState *nvic = env->nvic;
986     /*
987      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
988      * are banked and we want to update the bit in the bank for the
989      * current security state; and in one case we want to specifically
990      * update the NS banked version of a bit even if we are secure.
991      */
992     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
993     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
994     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
995     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
996 
997     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
998 
999     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1000         bool splimviol;
1001         uint32_t splim = v7m_sp_limit(env);
1002         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1003             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1004 
1005         splimviol = !ign && frameptr < splim;
1006         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1007     }
1008 
1009     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1010 
1011     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1012 
1013     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1014 
1015     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1016                         !arm_v7m_is_handler_mode(env));
1017 
1018     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1019     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1020 
1021     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1022     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1023 
1024     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1025     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1026 
1027     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1028     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1029 
1030     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1031     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1032 
1033     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1034         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1035         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1036 
1037         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1038         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1039     }
1040 }
1041 
1042 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1043 {
1044     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1045     ARMCPU *cpu = env_archcpu(env);
1046     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1047     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1048     uintptr_t ra = GETPC();
1049 
1050     assert(env->v7m.secure);
1051 
1052     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1053         return;
1054     }
1055 
1056     /* Check access to the coprocessor is permitted */
1057     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1058         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1059     }
1060 
1061     if (lspact) {
1062         /* LSPACT should not be active when there is active FP state */
1063         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1064     }
1065 
1066     if (fptr & 7) {
1067         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1068     }
1069 
1070     /*
1071      * Note that we do not use v7m_stack_write() here, because the
1072      * accesses should not set the FSR bits for stacking errors if they
1073      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1074      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1075      * and longjmp out.
1076      */
1077     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1078         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1079         int i;
1080 
1081         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1082             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1083             uint32_t faddr = fptr + 4 * i;
1084             uint32_t slo = extract64(dn, 0, 32);
1085             uint32_t shi = extract64(dn, 32, 32);
1086 
1087             if (i >= 16) {
1088                 faddr += 8; /* skip the slot for the FPSCR */
1089             }
1090             cpu_stl_data_ra(env, faddr, slo, ra);
1091             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1092         }
1093         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1094         if (cpu_isar_feature(aa32_mve, cpu)) {
1095             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1096         }
1097 
1098         /*
1099          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1100          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1101          */
1102         if (ts) {
1103             for (i = 0; i < 32; i += 2) {
1104                 *aa32_vfp_dreg(env, i / 2) = 0;
1105             }
1106             vfp_set_fpscr(env, 0);
1107             if (cpu_isar_feature(aa32_mve, cpu)) {
1108                 env->v7m.vpr = 0;
1109             }
1110         }
1111     } else {
1112         v7m_update_fpccr(env, fptr, false);
1113     }
1114 
1115     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1116 }
1117 
1118 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1119 {
1120     ARMCPU *cpu = env_archcpu(env);
1121     uintptr_t ra = GETPC();
1122 
1123     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1124     assert(env->v7m.secure);
1125 
1126     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1127         return;
1128     }
1129 
1130     /* Check access to the coprocessor is permitted */
1131     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1132         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1133     }
1134 
1135     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1136         /* State in FP is still valid */
1137         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1138     } else {
1139         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1140         int i;
1141         uint32_t fpscr;
1142 
1143         if (fptr & 7) {
1144             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1145         }
1146 
1147         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1148             uint32_t slo, shi;
1149             uint64_t dn;
1150             uint32_t faddr = fptr + 4 * i;
1151 
1152             if (i >= 16) {
1153                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1154             }
1155 
1156             slo = cpu_ldl_data_ra(env, faddr, ra);
1157             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1158 
1159             dn = (uint64_t) shi << 32 | slo;
1160             *aa32_vfp_dreg(env, i / 2) = dn;
1161         }
1162         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1163         vfp_set_fpscr(env, fpscr);
1164         if (cpu_isar_feature(aa32_mve, cpu)) {
1165             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1166         }
1167     }
1168 
1169     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1170 }
1171 
1172 static bool v7m_push_stack(ARMCPU *cpu)
1173 {
1174     /*
1175      * Do the "set up stack frame" part of exception entry,
1176      * similar to pseudocode PushStack().
1177      * Return true if we generate a derived exception (and so
1178      * should ignore further stack faults trying to process
1179      * that derived exception.)
1180      */
1181     bool stacked_ok = true, limitviol = false;
1182     CPUARMState *env = &cpu->env;
1183     uint32_t xpsr = xpsr_read(env);
1184     uint32_t frameptr = env->regs[13];
1185     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1186     uint32_t framesize;
1187     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1188 
1189     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1190         (env->v7m.secure || nsacr_cp10)) {
1191         if (env->v7m.secure &&
1192             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1193             framesize = 0xa8;
1194         } else {
1195             framesize = 0x68;
1196         }
1197     } else {
1198         framesize = 0x20;
1199     }
1200 
1201     /* Align stack pointer if the guest wants that */
1202     if ((frameptr & 4) &&
1203         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1204         frameptr -= 4;
1205         xpsr |= XPSR_SPREALIGN;
1206     }
1207 
1208     xpsr &= ~XPSR_SFPA;
1209     if (env->v7m.secure &&
1210         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1211         xpsr |= XPSR_SFPA;
1212     }
1213 
1214     frameptr -= framesize;
1215 
1216     if (arm_feature(env, ARM_FEATURE_V8)) {
1217         uint32_t limit = v7m_sp_limit(env);
1218 
1219         if (frameptr < limit) {
1220             /*
1221              * Stack limit failure: set SP to the limit value, and generate
1222              * STKOF UsageFault. Stack pushes below the limit must not be
1223              * performed. It is IMPDEF whether pushes above the limit are
1224              * performed; we choose not to.
1225              */
1226             qemu_log_mask(CPU_LOG_INT,
1227                           "...STKOF during stacking\n");
1228             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1229             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1230                                     env->v7m.secure);
1231             env->regs[13] = limit;
1232             /*
1233              * We won't try to perform any further memory accesses but
1234              * we must continue through the following code to check for
1235              * permission faults during FPU state preservation, and we
1236              * must update FPCCR if lazy stacking is enabled.
1237              */
1238             limitviol = true;
1239             stacked_ok = false;
1240         }
1241     }
1242 
1243     /*
1244      * Write as much of the stack frame as we can. If we fail a stack
1245      * write this will result in a derived exception being pended
1246      * (which may be taken in preference to the one we started with
1247      * if it has higher priority).
1248      */
1249     stacked_ok = stacked_ok &&
1250         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1251         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1252                         mmu_idx, STACK_NORMAL) &&
1253         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1254                         mmu_idx, STACK_NORMAL) &&
1255         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1256                         mmu_idx, STACK_NORMAL) &&
1257         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1258                         mmu_idx, STACK_NORMAL) &&
1259         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1260                         mmu_idx, STACK_NORMAL) &&
1261         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1262                         mmu_idx, STACK_NORMAL) &&
1263         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1264 
1265     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1266         /* FPU is active, try to save its registers */
1267         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1268         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1269 
1270         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1271             qemu_log_mask(CPU_LOG_INT,
1272                           "...SecureFault because LSPACT and FPCA both set\n");
1273             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1274             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1275         } else if (!env->v7m.secure && !nsacr_cp10) {
1276             qemu_log_mask(CPU_LOG_INT,
1277                           "...Secure UsageFault with CFSR.NOCP because "
1278                           "NSACR.CP10 prevents stacking FP regs\n");
1279             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1280             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1281         } else {
1282             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1283                 /* Lazy stacking disabled, save registers now */
1284                 int i;
1285                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1286                                                  arm_current_el(env) != 0);
1287 
1288                 if (stacked_ok && !cpacr_pass) {
1289                     /*
1290                      * Take UsageFault if CPACR forbids access. The pseudocode
1291                      * here does a full CheckCPEnabled() but we know the NSACR
1292                      * check can never fail as we have already handled that.
1293                      */
1294                     qemu_log_mask(CPU_LOG_INT,
1295                                   "...UsageFault with CFSR.NOCP because "
1296                                   "CPACR.CP10 prevents stacking FP regs\n");
1297                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1298                                             env->v7m.secure);
1299                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1300                     stacked_ok = false;
1301                 }
1302 
1303                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1304                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1305                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1306                     uint32_t slo = extract64(dn, 0, 32);
1307                     uint32_t shi = extract64(dn, 32, 32);
1308 
1309                     if (i >= 16) {
1310                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1311                     }
1312                     stacked_ok = stacked_ok &&
1313                         v7m_stack_write(cpu, faddr, slo,
1314                                         mmu_idx, STACK_NORMAL) &&
1315                         v7m_stack_write(cpu, faddr + 4, shi,
1316                                         mmu_idx, STACK_NORMAL);
1317                 }
1318                 stacked_ok = stacked_ok &&
1319                     v7m_stack_write(cpu, frameptr + 0x60,
1320                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1321                 if (cpu_isar_feature(aa32_mve, cpu)) {
1322                     stacked_ok = stacked_ok &&
1323                         v7m_stack_write(cpu, frameptr + 0x64,
1324                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1325                 }
1326                 if (cpacr_pass) {
1327                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1328                         *aa32_vfp_dreg(env, i / 2) = 0;
1329                     }
1330                     vfp_set_fpscr(env, 0);
1331                     if (cpu_isar_feature(aa32_mve, cpu)) {
1332                         env->v7m.vpr = 0;
1333                     }
1334                 }
1335             } else {
1336                 /* Lazy stacking enabled, save necessary info to stack later */
1337                 v7m_update_fpccr(env, frameptr + 0x20, true);
1338             }
1339         }
1340     }
1341 
1342     /*
1343      * If we broke a stack limit then SP was already updated earlier;
1344      * otherwise we update SP regardless of whether any of the stack
1345      * accesses failed or we took some other kind of fault.
1346      */
1347     if (!limitviol) {
1348         env->regs[13] = frameptr;
1349     }
1350 
1351     return !stacked_ok;
1352 }
1353 
1354 static void do_v7m_exception_exit(ARMCPU *cpu)
1355 {
1356     CPUARMState *env = &cpu->env;
1357     uint32_t excret;
1358     uint32_t xpsr, xpsr_mask;
1359     bool ufault = false;
1360     bool sfault = false;
1361     bool return_to_sp_process;
1362     bool return_to_handler;
1363     bool rettobase = false;
1364     bool exc_secure = false;
1365     bool return_to_secure;
1366     bool ftype;
1367     bool restore_s16_s31 = false;
1368 
1369     /*
1370      * If we're not in Handler mode then jumps to magic exception-exit
1371      * addresses don't have magic behaviour. However for the v8M
1372      * security extensions the magic secure-function-return has to
1373      * work in thread mode too, so to avoid doing an extra check in
1374      * the generated code we allow exception-exit magic to also cause the
1375      * internal exception and bring us here in thread mode. Correct code
1376      * will never try to do this (the following insn fetch will always
1377      * fault) so we the overhead of having taken an unnecessary exception
1378      * doesn't matter.
1379      */
1380     if (!arm_v7m_is_handler_mode(env)) {
1381         return;
1382     }
1383 
1384     /*
1385      * In the spec pseudocode ExceptionReturn() is called directly
1386      * from BXWritePC() and gets the full target PC value including
1387      * bit zero. In QEMU's implementation we treat it as a normal
1388      * jump-to-register (which is then caught later on), and so split
1389      * the target value up between env->regs[15] and env->thumb in
1390      * gen_bx(). Reconstitute it.
1391      */
1392     excret = env->regs[15];
1393     if (env->thumb) {
1394         excret |= 1;
1395     }
1396 
1397     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1398                   " previous exception %d\n",
1399                   excret, env->v7m.exception);
1400 
1401     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1402         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1403                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1404                       excret);
1405     }
1406 
1407     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1408 
1409     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1410         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1411                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1412                       "if FPU not present\n",
1413                       excret);
1414         ftype = true;
1415     }
1416 
1417     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1418         /*
1419          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1420          * we pick which FAULTMASK to clear.
1421          */
1422         if (!env->v7m.secure &&
1423             ((excret & R_V7M_EXCRET_ES_MASK) ||
1424              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1425             sfault = 1;
1426             /* For all other purposes, treat ES as 0 (R_HXSR) */
1427             excret &= ~R_V7M_EXCRET_ES_MASK;
1428         }
1429         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1430     }
1431 
1432     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1433         /*
1434          * Auto-clear FAULTMASK on return from other than NMI.
1435          * If the security extension is implemented then this only
1436          * happens if the raw execution priority is >= 0; the
1437          * value of the ES bit in the exception return value indicates
1438          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1439          */
1440         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1441             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1442                 env->v7m.faultmask[exc_secure] = 0;
1443             }
1444         } else {
1445             env->v7m.faultmask[M_REG_NS] = 0;
1446         }
1447     }
1448 
1449     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1450                                      exc_secure)) {
1451     case -1:
1452         /* attempt to exit an exception that isn't active */
1453         ufault = true;
1454         break;
1455     case 0:
1456         /* still an irq active now */
1457         break;
1458     case 1:
1459         /*
1460          * We returned to base exception level, no nesting.
1461          * (In the pseudocode this is written using "NestedActivation != 1"
1462          * where we have 'rettobase == false'.)
1463          */
1464         rettobase = true;
1465         break;
1466     default:
1467         g_assert_not_reached();
1468     }
1469 
1470     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1471     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1472     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1473         (excret & R_V7M_EXCRET_S_MASK);
1474 
1475     if (arm_feature(env, ARM_FEATURE_V8)) {
1476         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1477             /*
1478              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1479              * we choose to take the UsageFault.
1480              */
1481             if ((excret & R_V7M_EXCRET_S_MASK) ||
1482                 (excret & R_V7M_EXCRET_ES_MASK) ||
1483                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1484                 ufault = true;
1485             }
1486         }
1487         if (excret & R_V7M_EXCRET_RES0_MASK) {
1488             ufault = true;
1489         }
1490     } else {
1491         /* For v7M we only recognize certain combinations of the low bits */
1492         switch (excret & 0xf) {
1493         case 1: /* Return to Handler */
1494             break;
1495         case 13: /* Return to Thread using Process stack */
1496         case 9: /* Return to Thread using Main stack */
1497             /*
1498              * We only need to check NONBASETHRDENA for v7M, because in
1499              * v8M this bit does not exist (it is RES1).
1500              */
1501             if (!rettobase &&
1502                 !(env->v7m.ccr[env->v7m.secure] &
1503                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1504                 ufault = true;
1505             }
1506             break;
1507         default:
1508             ufault = true;
1509         }
1510     }
1511 
1512     /*
1513      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1514      * Handler mode (and will be until we write the new XPSR.Interrupt
1515      * field) this does not switch around the current stack pointer.
1516      * We must do this before we do any kind of tailchaining, including
1517      * for the derived exceptions on integrity check failures, or we will
1518      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1519      */
1520     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1521 
1522     /*
1523      * Clear scratch FP values left in caller saved registers; this
1524      * must happen before any kind of tail chaining.
1525      */
1526     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1527         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1528         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1529             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1530             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1531             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1532                           "stackframe: error during lazy state deactivation\n");
1533             v7m_exception_taken(cpu, excret, true, false);
1534             return;
1535         } else {
1536             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1537                 /* v8.1M adds this NOCP check */
1538                 bool nsacr_pass = exc_secure ||
1539                     extract32(env->v7m.nsacr, 10, 1);
1540                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1541                 if (!nsacr_pass) {
1542                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1543                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1544                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1545                         "stackframe: NSACR prevents clearing FPU registers\n");
1546                     v7m_exception_taken(cpu, excret, true, false);
1547                     return;
1548                 } else if (!cpacr_pass) {
1549                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1550                                             exc_secure);
1551                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1552                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1553                         "stackframe: CPACR prevents clearing FPU registers\n");
1554                     v7m_exception_taken(cpu, excret, true, false);
1555                     return;
1556                 }
1557             }
1558             /* Clear s0..s15, FPSCR and VPR */
1559             int i;
1560 
1561             for (i = 0; i < 16; i += 2) {
1562                 *aa32_vfp_dreg(env, i / 2) = 0;
1563             }
1564             vfp_set_fpscr(env, 0);
1565             if (cpu_isar_feature(aa32_mve, cpu)) {
1566                 env->v7m.vpr = 0;
1567             }
1568         }
1569     }
1570 
1571     if (sfault) {
1572         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1573         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1574         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1575                       "stackframe: failed EXC_RETURN.ES validity check\n");
1576         v7m_exception_taken(cpu, excret, true, false);
1577         return;
1578     }
1579 
1580     if (ufault) {
1581         /*
1582          * Bad exception return: instead of popping the exception
1583          * stack, directly take a usage fault on the current stack.
1584          */
1585         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1586         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1587         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1588                       "stackframe: failed exception return integrity check\n");
1589         v7m_exception_taken(cpu, excret, true, false);
1590         return;
1591     }
1592 
1593     /*
1594      * Tailchaining: if there is currently a pending exception that
1595      * is high enough priority to preempt execution at the level we're
1596      * about to return to, then just directly take that exception now,
1597      * avoiding an unstack-and-then-stack. Note that now we have
1598      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1599      * our current execution priority is already the execution priority we are
1600      * returning to -- none of the state we would unstack or set based on
1601      * the EXCRET value affects it.
1602      */
1603     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1604         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1605         v7m_exception_taken(cpu, excret, true, false);
1606         return;
1607     }
1608 
1609     switch_v7m_security_state(env, return_to_secure);
1610 
1611     {
1612         /*
1613          * The stack pointer we should be reading the exception frame from
1614          * depends on bits in the magic exception return type value (and
1615          * for v8M isn't necessarily the stack pointer we will eventually
1616          * end up resuming execution with). Get a pointer to the location
1617          * in the CPU state struct where the SP we need is currently being
1618          * stored; we will use and modify it in place.
1619          * We use this limited C variable scope so we don't accidentally
1620          * use 'frame_sp_p' after we do something that makes it invalid.
1621          */
1622         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1623         uint32_t *frame_sp_p = arm_v7m_get_sp_ptr(env, return_to_secure,
1624                                                   !return_to_handler, spsel);
1625         uint32_t frameptr = *frame_sp_p;
1626         bool pop_ok = true;
1627         ARMMMUIdx mmu_idx;
1628         bool return_to_priv = return_to_handler ||
1629             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1630 
1631         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1632                                                         return_to_priv);
1633 
1634         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1635             arm_feature(env, ARM_FEATURE_V8)) {
1636             qemu_log_mask(LOG_GUEST_ERROR,
1637                           "M profile exception return with non-8-aligned SP "
1638                           "for destination state is UNPREDICTABLE\n");
1639         }
1640 
1641         /* Do we need to pop callee-saved registers? */
1642         if (return_to_secure &&
1643             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1644              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1645             uint32_t actual_sig;
1646 
1647             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1648 
1649             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1650                 /* Take a SecureFault on the current stack */
1651                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1652                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1653                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1654                               "stackframe: failed exception return integrity "
1655                               "signature check\n");
1656                 v7m_exception_taken(cpu, excret, true, false);
1657                 return;
1658             }
1659 
1660             pop_ok = pop_ok &&
1661                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1662                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1663                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1664                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1665                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1666                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1667                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1668                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1669 
1670             frameptr += 0x28;
1671         }
1672 
1673         /* Pop registers */
1674         pop_ok = pop_ok &&
1675             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1676             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1677             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1678             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1679             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1680             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1681             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1682             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1683 
1684         if (!pop_ok) {
1685             /*
1686              * v7m_stack_read() pended a fault, so take it (as a tail
1687              * chained exception on the same stack frame)
1688              */
1689             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1690             v7m_exception_taken(cpu, excret, true, false);
1691             return;
1692         }
1693 
1694         /*
1695          * Returning from an exception with a PC with bit 0 set is defined
1696          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1697          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1698          * the lsbit, and there are several RTOSes out there which incorrectly
1699          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1700          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1701          * complain about the badly behaved guest.
1702          */
1703         if (env->regs[15] & 1) {
1704             env->regs[15] &= ~1U;
1705             if (!arm_feature(env, ARM_FEATURE_V8)) {
1706                 qemu_log_mask(LOG_GUEST_ERROR,
1707                               "M profile return from interrupt with misaligned "
1708                               "PC is UNPREDICTABLE on v7M\n");
1709             }
1710         }
1711 
1712         if (arm_feature(env, ARM_FEATURE_V8)) {
1713             /*
1714              * For v8M we have to check whether the xPSR exception field
1715              * matches the EXCRET value for return to handler/thread
1716              * before we commit to changing the SP and xPSR.
1717              */
1718             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1719             if (return_to_handler != will_be_handler) {
1720                 /*
1721                  * Take an INVPC UsageFault on the current stack.
1722                  * By this point we will have switched to the security state
1723                  * for the background state, so this UsageFault will target
1724                  * that state.
1725                  */
1726                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1727                                         env->v7m.secure);
1728                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1729                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1730                               "stackframe: failed exception return integrity "
1731                               "check\n");
1732                 v7m_exception_taken(cpu, excret, true, false);
1733                 return;
1734             }
1735         }
1736 
1737         if (!ftype) {
1738             /* FP present and we need to handle it */
1739             if (!return_to_secure &&
1740                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1741                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1742                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1743                 qemu_log_mask(CPU_LOG_INT,
1744                               "...taking SecureFault on existing stackframe: "
1745                               "Secure LSPACT set but exception return is "
1746                               "not to secure state\n");
1747                 v7m_exception_taken(cpu, excret, true, false);
1748                 return;
1749             }
1750 
1751             restore_s16_s31 = return_to_secure &&
1752                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1753 
1754             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1755                 /* State in FPU is still valid, just clear LSPACT */
1756                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1757             } else {
1758                 int i;
1759                 uint32_t fpscr;
1760                 bool cpacr_pass, nsacr_pass;
1761 
1762                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1763                                             return_to_priv);
1764                 nsacr_pass = return_to_secure ||
1765                     extract32(env->v7m.nsacr, 10, 1);
1766 
1767                 if (!cpacr_pass) {
1768                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1769                                             return_to_secure);
1770                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1771                     qemu_log_mask(CPU_LOG_INT,
1772                                   "...taking UsageFault on existing "
1773                                   "stackframe: CPACR.CP10 prevents unstacking "
1774                                   "FP regs\n");
1775                     v7m_exception_taken(cpu, excret, true, false);
1776                     return;
1777                 } else if (!nsacr_pass) {
1778                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1779                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1780                     qemu_log_mask(CPU_LOG_INT,
1781                                   "...taking Secure UsageFault on existing "
1782                                   "stackframe: NSACR.CP10 prevents unstacking "
1783                                   "FP regs\n");
1784                     v7m_exception_taken(cpu, excret, true, false);
1785                     return;
1786                 }
1787 
1788                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1789                     uint32_t slo, shi;
1790                     uint64_t dn;
1791                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1792 
1793                     if (i >= 16) {
1794                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1795                     }
1796 
1797                     pop_ok = pop_ok &&
1798                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1799                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1800 
1801                     if (!pop_ok) {
1802                         break;
1803                     }
1804 
1805                     dn = (uint64_t)shi << 32 | slo;
1806                     *aa32_vfp_dreg(env, i / 2) = dn;
1807                 }
1808                 pop_ok = pop_ok &&
1809                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1810                 if (pop_ok) {
1811                     vfp_set_fpscr(env, fpscr);
1812                 }
1813                 if (cpu_isar_feature(aa32_mve, cpu)) {
1814                     pop_ok = pop_ok &&
1815                         v7m_stack_read(cpu, &env->v7m.vpr,
1816                                        frameptr + 0x64, mmu_idx);
1817                 }
1818                 if (!pop_ok) {
1819                     /*
1820                      * These regs are 0 if security extension present;
1821                      * otherwise merely UNKNOWN. We zero always.
1822                      */
1823                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1824                         *aa32_vfp_dreg(env, i / 2) = 0;
1825                     }
1826                     vfp_set_fpscr(env, 0);
1827                     if (cpu_isar_feature(aa32_mve, cpu)) {
1828                         env->v7m.vpr = 0;
1829                     }
1830                 }
1831             }
1832         }
1833         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1834                                                V7M_CONTROL, FPCA, !ftype);
1835 
1836         /* Commit to consuming the stack frame */
1837         frameptr += 0x20;
1838         if (!ftype) {
1839             frameptr += 0x48;
1840             if (restore_s16_s31) {
1841                 frameptr += 0x40;
1842             }
1843         }
1844         /*
1845          * Undo stack alignment (the SPREALIGN bit indicates that the original
1846          * pre-exception SP was not 8-aligned and we added a padding word to
1847          * align it, so we undo this by ORing in the bit that increases it
1848          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1849          * would work too but a logical OR is how the pseudocode specifies it.)
1850          */
1851         if (xpsr & XPSR_SPREALIGN) {
1852             frameptr |= 4;
1853         }
1854         *frame_sp_p = frameptr;
1855     }
1856 
1857     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1858     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1859         xpsr_mask &= ~XPSR_GE;
1860     }
1861     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1862     xpsr_write(env, xpsr, xpsr_mask);
1863 
1864     if (env->v7m.secure) {
1865         bool sfpa = xpsr & XPSR_SFPA;
1866 
1867         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1868                                                V7M_CONTROL, SFPA, sfpa);
1869     }
1870 
1871     /*
1872      * The restored xPSR exception field will be zero if we're
1873      * resuming in Thread mode. If that doesn't match what the
1874      * exception return excret specified then this is a UsageFault.
1875      * v7M requires we make this check here; v8M did it earlier.
1876      */
1877     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1878         /*
1879          * Take an INVPC UsageFault by pushing the stack again;
1880          * we know we're v7M so this is never a Secure UsageFault.
1881          */
1882         bool ignore_stackfaults;
1883 
1884         assert(!arm_feature(env, ARM_FEATURE_V8));
1885         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1886         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1887         ignore_stackfaults = v7m_push_stack(cpu);
1888         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1889                       "failed exception return integrity check\n");
1890         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1891         return;
1892     }
1893 
1894     /* Otherwise, we have a successful exception exit. */
1895     arm_clear_exclusive(env);
1896     arm_rebuild_hflags(env);
1897     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1898 }
1899 
1900 static bool do_v7m_function_return(ARMCPU *cpu)
1901 {
1902     /*
1903      * v8M security extensions magic function return.
1904      * We may either:
1905      *  (1) throw an exception (longjump)
1906      *  (2) return true if we successfully handled the function return
1907      *  (3) return false if we failed a consistency check and have
1908      *      pended a UsageFault that needs to be taken now
1909      *
1910      * At this point the magic return value is split between env->regs[15]
1911      * and env->thumb. We don't bother to reconstitute it because we don't
1912      * need it (all values are handled the same way).
1913      */
1914     CPUARMState *env = &cpu->env;
1915     uint32_t newpc, newpsr, newpsr_exc;
1916 
1917     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1918 
1919     {
1920         bool threadmode, spsel;
1921         MemOpIdx oi;
1922         ARMMMUIdx mmu_idx;
1923         uint32_t *frame_sp_p;
1924         uint32_t frameptr;
1925 
1926         /* Pull the return address and IPSR from the Secure stack */
1927         threadmode = !arm_v7m_is_handler_mode(env);
1928         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1929 
1930         frame_sp_p = arm_v7m_get_sp_ptr(env, true, threadmode, spsel);
1931         frameptr = *frame_sp_p;
1932 
1933         /*
1934          * These loads may throw an exception (for MPU faults). We want to
1935          * do them as secure, so work out what MMU index that is.
1936          */
1937         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1938         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1939         newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
1940         newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
1941 
1942         /* Consistency checks on new IPSR */
1943         newpsr_exc = newpsr & XPSR_EXCP;
1944         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1945               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1946             /* Pend the fault and tell our caller to take it */
1947             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1948             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1949                                     env->v7m.secure);
1950             qemu_log_mask(CPU_LOG_INT,
1951                           "...taking INVPC UsageFault: "
1952                           "IPSR consistency check failed\n");
1953             return false;
1954         }
1955 
1956         *frame_sp_p = frameptr + 8;
1957     }
1958 
1959     /* This invalidates frame_sp_p */
1960     switch_v7m_security_state(env, true);
1961     env->v7m.exception = newpsr_exc;
1962     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
1963     if (newpsr & XPSR_SFPA) {
1964         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
1965     }
1966     xpsr_write(env, 0, XPSR_IT);
1967     env->thumb = newpc & 1;
1968     env->regs[15] = newpc & ~1;
1969     arm_rebuild_hflags(env);
1970 
1971     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
1972     return true;
1973 }
1974 
1975 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
1976                                uint32_t addr, uint16_t *insn)
1977 {
1978     /*
1979      * Load a 16-bit portion of a v7M instruction, returning true on success,
1980      * or false on failure (in which case we will have pended the appropriate
1981      * exception).
1982      * We need to do the instruction fetch's MPU and SAU checks
1983      * like this because there is no MMU index that would allow
1984      * doing the load with a single function call. Instead we must
1985      * first check that the security attributes permit the load
1986      * and that they don't mismatch on the two halves of the instruction,
1987      * and then we do the load as a secure load (ie using the security
1988      * attributes of the address, not the CPU, as architecturally required).
1989      */
1990     CPUState *cs = CPU(cpu);
1991     CPUARMState *env = &cpu->env;
1992     V8M_SAttributes sattrs = {};
1993     GetPhysAddrResult res = {};
1994     ARMMMUFaultInfo fi = {};
1995     MemTxResult txres;
1996 
1997     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
1998     if (!sattrs.nsc || sattrs.ns) {
1999         /*
2000          * This must be the second half of the insn, and it straddles a
2001          * region boundary with the second half not being S&NSC.
2002          */
2003         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2004         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2005         qemu_log_mask(CPU_LOG_INT,
2006                       "...really SecureFault with SFSR.INVEP\n");
2007         return false;
2008     }
2009     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
2010         /* the MPU lookup failed */
2011         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2012         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2013         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2014         return false;
2015     }
2016     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2017                                   res.f.phys_addr, res.f.attrs, &txres);
2018     if (txres != MEMTX_OK) {
2019         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2020         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2021         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2022         return false;
2023     }
2024     return true;
2025 }
2026 
2027 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2028                                    uint32_t addr, uint32_t *spdata)
2029 {
2030     /*
2031      * Read a word of data from the stack for the SG instruction,
2032      * writing the value into *spdata. If the load succeeds, return
2033      * true; otherwise pend an appropriate exception and return false.
2034      * (We can't use data load helpers here that throw an exception
2035      * because of the context we're called in, which is halfway through
2036      * arm_v7m_cpu_do_interrupt().)
2037      */
2038     CPUState *cs = CPU(cpu);
2039     CPUARMState *env = &cpu->env;
2040     MemTxResult txres;
2041     GetPhysAddrResult res = {};
2042     ARMMMUFaultInfo fi = {};
2043     uint32_t value;
2044 
2045     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
2046         /* MPU/SAU lookup failed */
2047         if (fi.type == ARMFault_QEMU_SFault) {
2048             qemu_log_mask(CPU_LOG_INT,
2049                           "...SecureFault during stack word read\n");
2050             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2051             env->v7m.sfar = addr;
2052             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2053         } else {
2054             qemu_log_mask(CPU_LOG_INT,
2055                           "...MemManageFault during stack word read\n");
2056             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2057                 R_V7M_CFSR_MMARVALID_MASK;
2058             env->v7m.mmfar[M_REG_S] = addr;
2059             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2060         }
2061         return false;
2062     }
2063     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2064                               res.f.phys_addr, res.f.attrs, &txres);
2065     if (txres != MEMTX_OK) {
2066         /* BusFault trying to read the data */
2067         qemu_log_mask(CPU_LOG_INT,
2068                       "...BusFault during stack word read\n");
2069         env->v7m.cfsr[M_REG_NS] |=
2070             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2071         env->v7m.bfar = addr;
2072         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2073         return false;
2074     }
2075 
2076     *spdata = value;
2077     return true;
2078 }
2079 
2080 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2081 {
2082     /*
2083      * Check whether this attempt to execute code in a Secure & NS-Callable
2084      * memory region is for an SG instruction; if so, then emulate the
2085      * effect of the SG instruction and return true. Otherwise pend
2086      * the correct kind of exception and return false.
2087      */
2088     CPUARMState *env = &cpu->env;
2089     ARMMMUIdx mmu_idx;
2090     uint16_t insn;
2091 
2092     /*
2093      * We should never get here unless get_phys_addr_pmsav8() caused
2094      * an exception for NS executing in S&NSC memory.
2095      */
2096     assert(!env->v7m.secure);
2097     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2098 
2099     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2100     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2101 
2102     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2103         return false;
2104     }
2105 
2106     if (!env->thumb) {
2107         goto gen_invep;
2108     }
2109 
2110     if (insn != 0xe97f) {
2111         /*
2112          * Not an SG instruction first half (we choose the IMPDEF
2113          * early-SG-check option).
2114          */
2115         goto gen_invep;
2116     }
2117 
2118     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2119         return false;
2120     }
2121 
2122     if (insn != 0xe97f) {
2123         /*
2124          * Not an SG instruction second half (yes, both halves of the SG
2125          * insn have the same hex value)
2126          */
2127         goto gen_invep;
2128     }
2129 
2130     /*
2131      * OK, we have confirmed that we really have an SG instruction.
2132      * We know we're NS in S memory so don't need to repeat those checks.
2133      */
2134     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2135                   ", executing it\n", env->regs[15]);
2136 
2137     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2138         !arm_v7m_is_handler_mode(env)) {
2139         /*
2140          * v8.1M exception stack frame integrity check. Note that we
2141          * must perform the memory access even if CCR_S.TRD is zero
2142          * and we aren't going to check what the data loaded is.
2143          */
2144         uint32_t spdata, sp;
2145 
2146         /*
2147          * We know we are currently NS, so the S stack pointers must be
2148          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2149          */
2150         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2151         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2152             /* Stack access failed and an exception has been pended */
2153             return false;
2154         }
2155 
2156         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2157             if (((spdata & ~1) == 0xfefa125a) ||
2158                 !(env->v7m.control[M_REG_S] & 1)) {
2159                 goto gen_invep;
2160             }
2161         }
2162     }
2163 
2164     env->regs[14] &= ~1;
2165     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2166     switch_v7m_security_state(env, true);
2167     xpsr_write(env, 0, XPSR_IT);
2168     env->regs[15] += 4;
2169     arm_rebuild_hflags(env);
2170     return true;
2171 
2172 gen_invep:
2173     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2174     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2175     qemu_log_mask(CPU_LOG_INT,
2176                   "...really SecureFault with SFSR.INVEP\n");
2177     return false;
2178 }
2179 
2180 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2181 {
2182     ARMCPU *cpu = ARM_CPU(cs);
2183     CPUARMState *env = &cpu->env;
2184     uint32_t lr;
2185     bool ignore_stackfaults;
2186 
2187     arm_log_exception(cs);
2188 
2189     /*
2190      * For exceptions we just mark as pending on the NVIC, and let that
2191      * handle it.
2192      */
2193     switch (cs->exception_index) {
2194     case EXCP_UDEF:
2195         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2196         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2197         break;
2198     case EXCP_NOCP:
2199     {
2200         /*
2201          * NOCP might be directed to something other than the current
2202          * security state if this fault is because of NSACR; we indicate
2203          * the target security state using exception.target_el.
2204          */
2205         int target_secstate;
2206 
2207         if (env->exception.target_el == 3) {
2208             target_secstate = M_REG_S;
2209         } else {
2210             target_secstate = env->v7m.secure;
2211         }
2212         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2213         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2214         break;
2215     }
2216     case EXCP_INVSTATE:
2217         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2218         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2219         break;
2220     case EXCP_STKOF:
2221         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2222         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2223         break;
2224     case EXCP_LSERR:
2225         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2226         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2227         break;
2228     case EXCP_UNALIGNED:
2229         /* Unaligned faults reported by M-profile aware code */
2230         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2231         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2232         break;
2233     case EXCP_DIVBYZERO:
2234         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2235         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2236         break;
2237     case EXCP_SWI:
2238         /* The PC already points to the next instruction.  */
2239         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2240         break;
2241     case EXCP_PREFETCH_ABORT:
2242     case EXCP_DATA_ABORT:
2243         /*
2244          * Note that for M profile we don't have a guest facing FSR, but
2245          * the env->exception.fsr will be populated by the code that
2246          * raises the fault, in the A profile short-descriptor format.
2247          *
2248          * Log the exception.vaddress now regardless of subtype, because
2249          * logging below only logs it when it goes into a guest visible
2250          * register.
2251          */
2252         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2253                       (uint32_t)env->exception.vaddress);
2254         switch (env->exception.fsr & 0xf) {
2255         case M_FAKE_FSR_NSC_EXEC:
2256             /*
2257              * Exception generated when we try to execute code at an address
2258              * which is marked as Secure & Non-Secure Callable and the CPU
2259              * is in the Non-Secure state. The only instruction which can
2260              * be executed like this is SG (and that only if both halves of
2261              * the SG instruction have the same security attributes.)
2262              * Everything else must generate an INVEP SecureFault, so we
2263              * emulate the SG instruction here.
2264              */
2265             if (v7m_handle_execute_nsc(cpu)) {
2266                 return;
2267             }
2268             break;
2269         case M_FAKE_FSR_SFAULT:
2270             /*
2271              * Various flavours of SecureFault for attempts to execute or
2272              * access data in the wrong security state.
2273              */
2274             switch (cs->exception_index) {
2275             case EXCP_PREFETCH_ABORT:
2276                 if (env->v7m.secure) {
2277                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2278                     qemu_log_mask(CPU_LOG_INT,
2279                                   "...really SecureFault with SFSR.INVTRAN\n");
2280                 } else {
2281                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2282                     qemu_log_mask(CPU_LOG_INT,
2283                                   "...really SecureFault with SFSR.INVEP\n");
2284                 }
2285                 break;
2286             case EXCP_DATA_ABORT:
2287                 /* This must be an NS access to S memory */
2288                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2289                 qemu_log_mask(CPU_LOG_INT,
2290                               "...really SecureFault with SFSR.AUVIOL\n");
2291                 break;
2292             }
2293             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2294             break;
2295         case 0x8: /* External Abort */
2296             switch (cs->exception_index) {
2297             case EXCP_PREFETCH_ABORT:
2298                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2299                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2300                 break;
2301             case EXCP_DATA_ABORT:
2302                 env->v7m.cfsr[M_REG_NS] |=
2303                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2304                 env->v7m.bfar = env->exception.vaddress;
2305                 qemu_log_mask(CPU_LOG_INT,
2306                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2307                               env->v7m.bfar);
2308                 break;
2309             }
2310             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2311             break;
2312         case 0x1: /* Alignment fault reported by generic code */
2313             qemu_log_mask(CPU_LOG_INT,
2314                           "...really UsageFault with UFSR.UNALIGNED\n");
2315             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2316             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2317                                     env->v7m.secure);
2318             break;
2319         default:
2320             /*
2321              * All other FSR values are either MPU faults or "can't happen
2322              * for M profile" cases.
2323              */
2324             switch (cs->exception_index) {
2325             case EXCP_PREFETCH_ABORT:
2326                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2327                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2328                 break;
2329             case EXCP_DATA_ABORT:
2330                 env->v7m.cfsr[env->v7m.secure] |=
2331                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2332                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2333                 qemu_log_mask(CPU_LOG_INT,
2334                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2335                               env->v7m.mmfar[env->v7m.secure]);
2336                 break;
2337             }
2338             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2339                                     env->v7m.secure);
2340             break;
2341         }
2342         break;
2343     case EXCP_SEMIHOST:
2344         qemu_log_mask(CPU_LOG_INT,
2345                       "...handling as semihosting call 0x%x\n",
2346                       env->regs[0]);
2347 #ifdef CONFIG_TCG
2348         do_common_semihosting(cs);
2349 #else
2350         g_assert_not_reached();
2351 #endif
2352         env->regs[15] += env->thumb ? 2 : 4;
2353         return;
2354     case EXCP_BKPT:
2355         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2356         break;
2357     case EXCP_IRQ:
2358         break;
2359     case EXCP_EXCEPTION_EXIT:
2360         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2361             /* Must be v8M security extension function return */
2362             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2363             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2364             if (do_v7m_function_return(cpu)) {
2365                 return;
2366             }
2367         } else {
2368             do_v7m_exception_exit(cpu);
2369             return;
2370         }
2371         break;
2372     case EXCP_LAZYFP:
2373         /*
2374          * We already pended the specific exception in the NVIC in the
2375          * v7m_preserve_fp_state() helper function.
2376          */
2377         break;
2378     default:
2379         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2380         return; /* Never happens.  Keep compiler happy.  */
2381     }
2382 
2383     if (arm_feature(env, ARM_FEATURE_V8)) {
2384         lr = R_V7M_EXCRET_RES1_MASK |
2385             R_V7M_EXCRET_DCRS_MASK;
2386         /*
2387          * The S bit indicates whether we should return to Secure
2388          * or NonSecure (ie our current state).
2389          * The ES bit indicates whether we're taking this exception
2390          * to Secure or NonSecure (ie our target state). We set it
2391          * later, in v7m_exception_taken().
2392          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2393          * This corresponds to the ARM ARM pseudocode for v8M setting
2394          * some LR bits in PushStack() and some in ExceptionTaken();
2395          * the distinction matters for the tailchain cases where we
2396          * can take an exception without pushing the stack.
2397          */
2398         if (env->v7m.secure) {
2399             lr |= R_V7M_EXCRET_S_MASK;
2400         }
2401     } else {
2402         lr = R_V7M_EXCRET_RES1_MASK |
2403             R_V7M_EXCRET_S_MASK |
2404             R_V7M_EXCRET_DCRS_MASK |
2405             R_V7M_EXCRET_ES_MASK;
2406         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2407             lr |= R_V7M_EXCRET_SPSEL_MASK;
2408         }
2409     }
2410     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2411         lr |= R_V7M_EXCRET_FTYPE_MASK;
2412     }
2413     if (!arm_v7m_is_handler_mode(env)) {
2414         lr |= R_V7M_EXCRET_MODE_MASK;
2415     }
2416 
2417     ignore_stackfaults = v7m_push_stack(cpu);
2418     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2419 }
2420 
2421 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2422 {
2423     unsigned el = arm_current_el(env);
2424 
2425     /* First handle registers which unprivileged can read */
2426     switch (reg) {
2427     case 0 ... 7: /* xPSR sub-fields */
2428         return v7m_mrs_xpsr(env, reg, el);
2429     case 20: /* CONTROL */
2430         return arm_v7m_mrs_control(env, env->v7m.secure);
2431     case 0x94: /* CONTROL_NS */
2432         /*
2433          * We have to handle this here because unprivileged Secure code
2434          * can read the NS CONTROL register.
2435          */
2436         if (!env->v7m.secure) {
2437             return 0;
2438         }
2439         return env->v7m.control[M_REG_NS] |
2440             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2441     }
2442 
2443     if (el == 0) {
2444         return 0; /* unprivileged reads others as zero */
2445     }
2446 
2447     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2448         switch (reg) {
2449         case 0x88: /* MSP_NS */
2450             if (!env->v7m.secure) {
2451                 return 0;
2452             }
2453             return env->v7m.other_ss_msp;
2454         case 0x89: /* PSP_NS */
2455             if (!env->v7m.secure) {
2456                 return 0;
2457             }
2458             return env->v7m.other_ss_psp;
2459         case 0x8a: /* MSPLIM_NS */
2460             if (!env->v7m.secure) {
2461                 return 0;
2462             }
2463             return env->v7m.msplim[M_REG_NS];
2464         case 0x8b: /* PSPLIM_NS */
2465             if (!env->v7m.secure) {
2466                 return 0;
2467             }
2468             return env->v7m.psplim[M_REG_NS];
2469         case 0x90: /* PRIMASK_NS */
2470             if (!env->v7m.secure) {
2471                 return 0;
2472             }
2473             return env->v7m.primask[M_REG_NS];
2474         case 0x91: /* BASEPRI_NS */
2475             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2476                 goto bad_reg;
2477             }
2478             if (!env->v7m.secure) {
2479                 return 0;
2480             }
2481             return env->v7m.basepri[M_REG_NS];
2482         case 0x93: /* FAULTMASK_NS */
2483             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2484                 goto bad_reg;
2485             }
2486             if (!env->v7m.secure) {
2487                 return 0;
2488             }
2489             return env->v7m.faultmask[M_REG_NS];
2490         case 0x98: /* SP_NS */
2491         {
2492             /*
2493              * This gives the non-secure SP selected based on whether we're
2494              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2495              */
2496             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2497 
2498             if (!env->v7m.secure) {
2499                 return 0;
2500             }
2501             if (!arm_v7m_is_handler_mode(env) && spsel) {
2502                 return env->v7m.other_ss_psp;
2503             } else {
2504                 return env->v7m.other_ss_msp;
2505             }
2506         }
2507         default:
2508             break;
2509         }
2510     }
2511 
2512     switch (reg) {
2513     case 8: /* MSP */
2514         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2515     case 9: /* PSP */
2516         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2517     case 10: /* MSPLIM */
2518         if (!arm_feature(env, ARM_FEATURE_V8)) {
2519             goto bad_reg;
2520         }
2521         return env->v7m.msplim[env->v7m.secure];
2522     case 11: /* PSPLIM */
2523         if (!arm_feature(env, ARM_FEATURE_V8)) {
2524             goto bad_reg;
2525         }
2526         return env->v7m.psplim[env->v7m.secure];
2527     case 16: /* PRIMASK */
2528         return env->v7m.primask[env->v7m.secure];
2529     case 17: /* BASEPRI */
2530     case 18: /* BASEPRI_MAX */
2531         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2532             goto bad_reg;
2533         }
2534         return env->v7m.basepri[env->v7m.secure];
2535     case 19: /* FAULTMASK */
2536         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2537             goto bad_reg;
2538         }
2539         return env->v7m.faultmask[env->v7m.secure];
2540     default:
2541     bad_reg:
2542         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2543                                        " register %d\n", reg);
2544         return 0;
2545     }
2546 }
2547 
2548 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2549 {
2550     /*
2551      * We're passed bits [11..0] of the instruction; extract
2552      * SYSm and the mask bits.
2553      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2554      * we choose to treat them as if the mask bits were valid.
2555      * NB that the pseudocode 'mask' variable is bits [11..10],
2556      * whereas ours is [11..8].
2557      */
2558     uint32_t mask = extract32(maskreg, 8, 4);
2559     uint32_t reg = extract32(maskreg, 0, 8);
2560     int cur_el = arm_current_el(env);
2561 
2562     if (cur_el == 0 && reg > 7 && reg != 20) {
2563         /*
2564          * only xPSR sub-fields and CONTROL.SFPA may be written by
2565          * unprivileged code
2566          */
2567         return;
2568     }
2569 
2570     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2571         switch (reg) {
2572         case 0x88: /* MSP_NS */
2573             if (!env->v7m.secure) {
2574                 return;
2575             }
2576             env->v7m.other_ss_msp = val & ~3;
2577             return;
2578         case 0x89: /* PSP_NS */
2579             if (!env->v7m.secure) {
2580                 return;
2581             }
2582             env->v7m.other_ss_psp = val & ~3;
2583             return;
2584         case 0x8a: /* MSPLIM_NS */
2585             if (!env->v7m.secure) {
2586                 return;
2587             }
2588             env->v7m.msplim[M_REG_NS] = val & ~7;
2589             return;
2590         case 0x8b: /* PSPLIM_NS */
2591             if (!env->v7m.secure) {
2592                 return;
2593             }
2594             env->v7m.psplim[M_REG_NS] = val & ~7;
2595             return;
2596         case 0x90: /* PRIMASK_NS */
2597             if (!env->v7m.secure) {
2598                 return;
2599             }
2600             env->v7m.primask[M_REG_NS] = val & 1;
2601             return;
2602         case 0x91: /* BASEPRI_NS */
2603             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2604                 goto bad_reg;
2605             }
2606             if (!env->v7m.secure) {
2607                 return;
2608             }
2609             env->v7m.basepri[M_REG_NS] = val & 0xff;
2610             return;
2611         case 0x93: /* FAULTMASK_NS */
2612             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2613                 goto bad_reg;
2614             }
2615             if (!env->v7m.secure) {
2616                 return;
2617             }
2618             env->v7m.faultmask[M_REG_NS] = val & 1;
2619             return;
2620         case 0x94: /* CONTROL_NS */
2621             if (!env->v7m.secure) {
2622                 return;
2623             }
2624             write_v7m_control_spsel_for_secstate(env,
2625                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2626                                                  M_REG_NS);
2627             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2628                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2629                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2630             }
2631             /*
2632              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2633              * RES0 if the FPU is not present, and is stored in the S bank
2634              */
2635             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2636                 extract32(env->v7m.nsacr, 10, 1)) {
2637                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2638                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2639             }
2640             return;
2641         case 0x98: /* SP_NS */
2642         {
2643             /*
2644              * This gives the non-secure SP selected based on whether we're
2645              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2646              */
2647             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2648             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2649             uint32_t limit;
2650 
2651             if (!env->v7m.secure) {
2652                 return;
2653             }
2654 
2655             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2656 
2657             val &= ~0x3;
2658 
2659             if (val < limit) {
2660                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2661             }
2662 
2663             if (is_psp) {
2664                 env->v7m.other_ss_psp = val;
2665             } else {
2666                 env->v7m.other_ss_msp = val;
2667             }
2668             return;
2669         }
2670         default:
2671             break;
2672         }
2673     }
2674 
2675     switch (reg) {
2676     case 0 ... 7: /* xPSR sub-fields */
2677         v7m_msr_xpsr(env, mask, reg, val);
2678         break;
2679     case 8: /* MSP */
2680         if (v7m_using_psp(env)) {
2681             env->v7m.other_sp = val & ~3;
2682         } else {
2683             env->regs[13] = val & ~3;
2684         }
2685         break;
2686     case 9: /* PSP */
2687         if (v7m_using_psp(env)) {
2688             env->regs[13] = val & ~3;
2689         } else {
2690             env->v7m.other_sp = val & ~3;
2691         }
2692         break;
2693     case 10: /* MSPLIM */
2694         if (!arm_feature(env, ARM_FEATURE_V8)) {
2695             goto bad_reg;
2696         }
2697         env->v7m.msplim[env->v7m.secure] = val & ~7;
2698         break;
2699     case 11: /* PSPLIM */
2700         if (!arm_feature(env, ARM_FEATURE_V8)) {
2701             goto bad_reg;
2702         }
2703         env->v7m.psplim[env->v7m.secure] = val & ~7;
2704         break;
2705     case 16: /* PRIMASK */
2706         env->v7m.primask[env->v7m.secure] = val & 1;
2707         break;
2708     case 17: /* BASEPRI */
2709         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2710             goto bad_reg;
2711         }
2712         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2713         break;
2714     case 18: /* BASEPRI_MAX */
2715         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2716             goto bad_reg;
2717         }
2718         val &= 0xff;
2719         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2720                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2721             env->v7m.basepri[env->v7m.secure] = val;
2722         }
2723         break;
2724     case 19: /* FAULTMASK */
2725         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2726             goto bad_reg;
2727         }
2728         env->v7m.faultmask[env->v7m.secure] = val & 1;
2729         break;
2730     case 20: /* CONTROL */
2731         /*
2732          * Writing to the SPSEL bit only has an effect if we are in
2733          * thread mode; other bits can be updated by any privileged code.
2734          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2735          * env->v7m.control, so we only need update the others.
2736          * For v7M, we must just ignore explicit writes to SPSEL in handler
2737          * mode; for v8M the write is permitted but will have no effect.
2738          * All these bits are writes-ignored from non-privileged code,
2739          * except for SFPA.
2740          */
2741         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2742                            !arm_v7m_is_handler_mode(env))) {
2743             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2744         }
2745         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2746             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2747             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2748         }
2749         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2750             /*
2751              * SFPA is RAZ/WI from NS or if no FPU.
2752              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2753              * Both are stored in the S bank.
2754              */
2755             if (env->v7m.secure) {
2756                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2757                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2758             }
2759             if (cur_el > 0 &&
2760                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2761                  extract32(env->v7m.nsacr, 10, 1))) {
2762                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2763                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2764             }
2765         }
2766         break;
2767     default:
2768     bad_reg:
2769         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2770                                        " register %d\n", reg);
2771         return;
2772     }
2773 }
2774 
2775 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2776 {
2777     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2778     bool forceunpriv = op & 1;
2779     bool alt = op & 2;
2780     V8M_SAttributes sattrs = {};
2781     uint32_t tt_resp;
2782     bool r, rw, nsr, nsrw, mrvalid;
2783     ARMMMUIdx mmu_idx;
2784     uint32_t mregion;
2785     bool targetpriv;
2786     bool targetsec = env->v7m.secure;
2787 
2788     /*
2789      * Work out what the security state and privilege level we're
2790      * interested in is...
2791      */
2792     if (alt) {
2793         targetsec = !targetsec;
2794     }
2795 
2796     if (forceunpriv) {
2797         targetpriv = false;
2798     } else {
2799         targetpriv = arm_v7m_is_handler_mode(env) ||
2800             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2801     }
2802 
2803     /* ...and then figure out which MMU index this is */
2804     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2805 
2806     /*
2807      * We know that the MPU and SAU don't care about the access type
2808      * for our purposes beyond that we don't want to claim to be
2809      * an insn fetch, so we arbitrarily call this a read.
2810      */
2811 
2812     /*
2813      * MPU region info only available for privileged or if
2814      * inspecting the other MPU state.
2815      */
2816     if (arm_current_el(env) != 0 || alt) {
2817         GetPhysAddrResult res = {};
2818         ARMMMUFaultInfo fi = {};
2819 
2820         /* We can ignore the return value as prot is always set */
2821         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
2822                           &res, &fi, &mregion);
2823         if (mregion == -1) {
2824             mrvalid = false;
2825             mregion = 0;
2826         } else {
2827             mrvalid = true;
2828         }
2829         r = res.f.prot & PAGE_READ;
2830         rw = res.f.prot & PAGE_WRITE;
2831     } else {
2832         r = false;
2833         rw = false;
2834         mrvalid = false;
2835         mregion = 0;
2836     }
2837 
2838     if (env->v7m.secure) {
2839         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2840                             targetsec, &sattrs);
2841         nsr = sattrs.ns && r;
2842         nsrw = sattrs.ns && rw;
2843     } else {
2844         sattrs.ns = true;
2845         nsr = false;
2846         nsrw = false;
2847     }
2848 
2849     tt_resp = (sattrs.iregion << 24) |
2850         (sattrs.irvalid << 23) |
2851         ((!sattrs.ns) << 22) |
2852         (nsrw << 21) |
2853         (nsr << 20) |
2854         (rw << 19) |
2855         (r << 18) |
2856         (sattrs.srvalid << 17) |
2857         (mrvalid << 16) |
2858         (sattrs.sregion << 8) |
2859         mregion;
2860 
2861     return tt_resp;
2862 }
2863 
2864 #endif /* !CONFIG_USER_ONLY */
2865 
2866 uint32_t *arm_v7m_get_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
2867                              bool spsel)
2868 {
2869     /*
2870      * Return a pointer to the location where we currently store the
2871      * stack pointer for the requested security state and thread mode.
2872      * This pointer will become invalid if the CPU state is updated
2873      * such that the stack pointers are switched around (eg changing
2874      * the SPSEL control bit).
2875      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
2876      * Unlike that pseudocode, we require the caller to pass us in the
2877      * SPSEL control bit value; this is because we also use this
2878      * function in handling of pushing of the callee-saves registers
2879      * part of the v8M stack frame (pseudocode PushCalleeStack()),
2880      * and in the tailchain codepath the SPSEL bit comes from the exception
2881      * return magic LR value from the previous exception. The pseudocode
2882      * opencodes the stack-selection in PushCalleeStack(), but we prefer
2883      * to make this utility function generic enough to do the job.
2884      */
2885     bool want_psp = threadmode && spsel;
2886 
2887     if (secure == env->v7m.secure) {
2888         if (want_psp == v7m_using_psp(env)) {
2889             return &env->regs[13];
2890         } else {
2891             return &env->v7m.other_sp;
2892         }
2893     } else {
2894         if (want_psp) {
2895             return &env->v7m.other_ss_psp;
2896         } else {
2897             return &env->v7m.other_ss_msp;
2898         }
2899     }
2900 }
2901