xref: /openbmc/qemu/target/arm/tcg/m_helper.c (revision 5b5968c4)
1 /*
2  * ARM generic helpers.
3  *
4  * This code is licensed under the GNU GPL v2 or later.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "cpu.h"
11 #include "internals.h"
12 #include "exec/helper-proto.h"
13 #include "qemu/main-loop.h"
14 #include "qemu/bitops.h"
15 #include "qemu/log.h"
16 #include "exec/exec-all.h"
17 #ifdef CONFIG_TCG
18 #include "exec/cpu_ldst.h"
19 #include "semihosting/common-semi.h"
20 #endif
21 #if !defined(CONFIG_USER_ONLY)
22 #include "hw/intc/armv7m_nvic.h"
23 #endif
24 
25 static void v7m_msr_xpsr(CPUARMState *env, uint32_t mask,
26                          uint32_t reg, uint32_t val)
27 {
28     /* Only APSR is actually writable */
29     if (!(reg & 4)) {
30         uint32_t apsrmask = 0;
31 
32         if (mask & 8) {
33             apsrmask |= XPSR_NZCV | XPSR_Q;
34         }
35         if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
36             apsrmask |= XPSR_GE;
37         }
38         xpsr_write(env, val, apsrmask);
39     }
40 }
41 
42 static uint32_t v7m_mrs_xpsr(CPUARMState *env, uint32_t reg, unsigned el)
43 {
44     uint32_t mask = 0;
45 
46     if ((reg & 1) && el) {
47         mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
48     }
49     if (!(reg & 4)) {
50         mask |= XPSR_NZCV | XPSR_Q; /* APSR */
51         if (arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
52             mask |= XPSR_GE;
53         }
54     }
55     /* EPSR reads as zero */
56     return xpsr_read(env) & mask;
57 }
58 
59 static uint32_t v7m_mrs_control(CPUARMState *env, uint32_t secure)
60 {
61     uint32_t value = env->v7m.control[secure];
62 
63     if (!secure) {
64         /* SFPA is RAZ/WI from NS; FPCA is stored in the M_REG_S bank */
65         value |= env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK;
66     }
67     return value;
68 }
69 
70 #ifdef CONFIG_USER_ONLY
71 
72 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
73 {
74     uint32_t mask = extract32(maskreg, 8, 4);
75     uint32_t reg = extract32(maskreg, 0, 8);
76 
77     switch (reg) {
78     case 0 ... 7: /* xPSR sub-fields */
79         v7m_msr_xpsr(env, mask, reg, val);
80         break;
81     case 20: /* CONTROL */
82         /* There are no sub-fields that are actually writable from EL0. */
83         break;
84     default:
85         /* Unprivileged writes to other registers are ignored */
86         break;
87     }
88 }
89 
90 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
91 {
92     switch (reg) {
93     case 0 ... 7: /* xPSR sub-fields */
94         return v7m_mrs_xpsr(env, reg, 0);
95     case 20: /* CONTROL */
96         return v7m_mrs_control(env, 0);
97     default:
98         /* Unprivileged reads others as zero.  */
99         return 0;
100     }
101 }
102 
103 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
104 {
105     /* translate.c should never generate calls here in user-only mode */
106     g_assert_not_reached();
107 }
108 
109 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
110 {
111     /* translate.c should never generate calls here in user-only mode */
112     g_assert_not_reached();
113 }
114 
115 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
116 {
117     /* translate.c should never generate calls here in user-only mode */
118     g_assert_not_reached();
119 }
120 
121 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
122 {
123     /* translate.c should never generate calls here in user-only mode */
124     g_assert_not_reached();
125 }
126 
127 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
128 {
129     /* translate.c should never generate calls here in user-only mode */
130     g_assert_not_reached();
131 }
132 
133 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
134 {
135     /*
136      * The TT instructions can be used by unprivileged code, but in
137      * user-only emulation we don't have the MPU.
138      * Luckily since we know we are NonSecure unprivileged (and that in
139      * turn means that the A flag wasn't specified), all the bits in the
140      * register must be zero:
141      *  IREGION: 0 because IRVALID is 0
142      *  IRVALID: 0 because NS
143      *  S: 0 because NS
144      *  NSRW: 0 because NS
145      *  NSR: 0 because NS
146      *  RW: 0 because unpriv and A flag not set
147      *  R: 0 because unpriv and A flag not set
148      *  SRVALID: 0 because NS
149      *  MRVALID: 0 because unpriv and A flag not set
150      *  SREGION: 0 becaus SRVALID is 0
151      *  MREGION: 0 because MRVALID is 0
152      */
153     return 0;
154 }
155 
156 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
157 {
158     return ARMMMUIdx_MUser;
159 }
160 
161 #else /* !CONFIG_USER_ONLY */
162 
163 static ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
164                                      bool secstate, bool priv, bool negpri)
165 {
166     ARMMMUIdx mmu_idx = ARM_MMU_IDX_M;
167 
168     if (priv) {
169         mmu_idx |= ARM_MMU_IDX_M_PRIV;
170     }
171 
172     if (negpri) {
173         mmu_idx |= ARM_MMU_IDX_M_NEGPRI;
174     }
175 
176     if (secstate) {
177         mmu_idx |= ARM_MMU_IDX_M_S;
178     }
179 
180     return mmu_idx;
181 }
182 
183 static ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
184                                                        bool secstate, bool priv)
185 {
186     bool negpri = armv7m_nvic_neg_prio_requested(env->nvic, secstate);
187 
188     return arm_v7m_mmu_idx_all(env, secstate, priv, negpri);
189 }
190 
191 /* Return the MMU index for a v7M CPU in the specified security state */
192 ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
193 {
194     bool priv = arm_v7m_is_handler_mode(env) ||
195         !(env->v7m.control[secstate] & 1);
196 
197     return arm_v7m_mmu_idx_for_secstate_and_priv(env, secstate, priv);
198 }
199 
200 /*
201  * What kind of stack write are we doing? This affects how exceptions
202  * generated during the stacking are treated.
203  */
204 typedef enum StackingMode {
205     STACK_NORMAL,
206     STACK_IGNFAULTS,
207     STACK_LAZYFP,
208 } StackingMode;
209 
210 static bool v7m_stack_write(ARMCPU *cpu, uint32_t addr, uint32_t value,
211                             ARMMMUIdx mmu_idx, StackingMode mode)
212 {
213     CPUState *cs = CPU(cpu);
214     CPUARMState *env = &cpu->env;
215     MemTxResult txres;
216     GetPhysAddrResult res = {};
217     ARMMMUFaultInfo fi = {};
218     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
219     int exc;
220     bool exc_secure;
221 
222     if (get_phys_addr(env, addr, MMU_DATA_STORE, mmu_idx, &res, &fi)) {
223         /* MPU/SAU lookup failed */
224         if (fi.type == ARMFault_QEMU_SFault) {
225             if (mode == STACK_LAZYFP) {
226                 qemu_log_mask(CPU_LOG_INT,
227                               "...SecureFault with SFSR.LSPERR "
228                               "during lazy stacking\n");
229                 env->v7m.sfsr |= R_V7M_SFSR_LSPERR_MASK;
230             } else {
231                 qemu_log_mask(CPU_LOG_INT,
232                               "...SecureFault with SFSR.AUVIOL "
233                               "during stacking\n");
234                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
235             }
236             env->v7m.sfsr |= R_V7M_SFSR_SFARVALID_MASK;
237             env->v7m.sfar = addr;
238             exc = ARMV7M_EXCP_SECURE;
239             exc_secure = false;
240         } else {
241             if (mode == STACK_LAZYFP) {
242                 qemu_log_mask(CPU_LOG_INT,
243                               "...MemManageFault with CFSR.MLSPERR\n");
244                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MLSPERR_MASK;
245             } else {
246                 qemu_log_mask(CPU_LOG_INT,
247                               "...MemManageFault with CFSR.MSTKERR\n");
248                 env->v7m.cfsr[secure] |= R_V7M_CFSR_MSTKERR_MASK;
249             }
250             exc = ARMV7M_EXCP_MEM;
251             exc_secure = secure;
252         }
253         goto pend_fault;
254     }
255     address_space_stl_le(arm_addressspace(cs, res.f.attrs), res.f.phys_addr,
256                          value, res.f.attrs, &txres);
257     if (txres != MEMTX_OK) {
258         /* BusFault trying to write the data */
259         if (mode == STACK_LAZYFP) {
260             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.LSPERR\n");
261             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_LSPERR_MASK;
262         } else {
263             qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.STKERR\n");
264             env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_STKERR_MASK;
265         }
266         exc = ARMV7M_EXCP_BUS;
267         exc_secure = false;
268         goto pend_fault;
269     }
270     return true;
271 
272 pend_fault:
273     /*
274      * By pending the exception at this point we are making
275      * the IMPDEF choice "overridden exceptions pended" (see the
276      * MergeExcInfo() pseudocode). The other choice would be to not
277      * pend them now and then make a choice about which to throw away
278      * later if we have two derived exceptions.
279      * The only case when we must not pend the exception but instead
280      * throw it away is if we are doing the push of the callee registers
281      * and we've already generated a derived exception (this is indicated
282      * by the caller passing STACK_IGNFAULTS). Even in this case we will
283      * still update the fault status registers.
284      */
285     switch (mode) {
286     case STACK_NORMAL:
287         armv7m_nvic_set_pending_derived(env->nvic, exc, exc_secure);
288         break;
289     case STACK_LAZYFP:
290         armv7m_nvic_set_pending_lazyfp(env->nvic, exc, exc_secure);
291         break;
292     case STACK_IGNFAULTS:
293         break;
294     }
295     return false;
296 }
297 
298 static bool v7m_stack_read(ARMCPU *cpu, uint32_t *dest, uint32_t addr,
299                            ARMMMUIdx mmu_idx)
300 {
301     CPUState *cs = CPU(cpu);
302     CPUARMState *env = &cpu->env;
303     MemTxResult txres;
304     GetPhysAddrResult res = {};
305     ARMMMUFaultInfo fi = {};
306     bool secure = mmu_idx & ARM_MMU_IDX_M_S;
307     int exc;
308     bool exc_secure;
309     uint32_t value;
310 
311     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
312         /* MPU/SAU lookup failed */
313         if (fi.type == ARMFault_QEMU_SFault) {
314             qemu_log_mask(CPU_LOG_INT,
315                           "...SecureFault with SFSR.AUVIOL during unstack\n");
316             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
317             env->v7m.sfar = addr;
318             exc = ARMV7M_EXCP_SECURE;
319             exc_secure = false;
320         } else {
321             qemu_log_mask(CPU_LOG_INT,
322                           "...MemManageFault with CFSR.MUNSTKERR\n");
323             env->v7m.cfsr[secure] |= R_V7M_CFSR_MUNSTKERR_MASK;
324             exc = ARMV7M_EXCP_MEM;
325             exc_secure = secure;
326         }
327         goto pend_fault;
328     }
329 
330     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
331                               res.f.phys_addr, res.f.attrs, &txres);
332     if (txres != MEMTX_OK) {
333         /* BusFault trying to read the data */
334         qemu_log_mask(CPU_LOG_INT, "...BusFault with BFSR.UNSTKERR\n");
335         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_UNSTKERR_MASK;
336         exc = ARMV7M_EXCP_BUS;
337         exc_secure = false;
338         goto pend_fault;
339     }
340 
341     *dest = value;
342     return true;
343 
344 pend_fault:
345     /*
346      * By pending the exception at this point we are making
347      * the IMPDEF choice "overridden exceptions pended" (see the
348      * MergeExcInfo() pseudocode). The other choice would be to not
349      * pend them now and then make a choice about which to throw away
350      * later if we have two derived exceptions.
351      */
352     armv7m_nvic_set_pending(env->nvic, exc, exc_secure);
353     return false;
354 }
355 
356 void HELPER(v7m_preserve_fp_state)(CPUARMState *env)
357 {
358     /*
359      * Preserve FP state (because LSPACT was set and we are about
360      * to execute an FP instruction). This corresponds to the
361      * PreserveFPState() pseudocode.
362      * We may throw an exception if the stacking fails.
363      */
364     ARMCPU *cpu = env_archcpu(env);
365     bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
366     bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK);
367     bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK);
368     bool splimviol = env->v7m.fpccr[is_secure] & R_V7M_FPCCR_SPLIMVIOL_MASK;
369     uint32_t fpcar = env->v7m.fpcar[is_secure];
370     bool stacked_ok = true;
371     bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
372     bool take_exception;
373 
374     /* Take the iothread lock as we are going to touch the NVIC */
375     qemu_mutex_lock_iothread();
376 
377     /* Check the background context had access to the FPU */
378     if (!v7m_cpacr_pass(env, is_secure, is_priv)) {
379         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, is_secure);
380         env->v7m.cfsr[is_secure] |= R_V7M_CFSR_NOCP_MASK;
381         stacked_ok = false;
382     } else if (!is_secure && !extract32(env->v7m.nsacr, 10, 1)) {
383         armv7m_nvic_set_pending_lazyfp(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
384         env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
385         stacked_ok = false;
386     }
387 
388     if (!splimviol && stacked_ok) {
389         /* We only stack if the stack limit wasn't violated */
390         int i;
391         ARMMMUIdx mmu_idx;
392 
393         mmu_idx = arm_v7m_mmu_idx_all(env, is_secure, is_priv, negpri);
394         for (i = 0; i < (ts ? 32 : 16); i += 2) {
395             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
396             uint32_t faddr = fpcar + 4 * i;
397             uint32_t slo = extract64(dn, 0, 32);
398             uint32_t shi = extract64(dn, 32, 32);
399 
400             if (i >= 16) {
401                 faddr += 8; /* skip the slot for the FPSCR/VPR */
402             }
403             stacked_ok = stacked_ok &&
404                 v7m_stack_write(cpu, faddr, slo, mmu_idx, STACK_LAZYFP) &&
405                 v7m_stack_write(cpu, faddr + 4, shi, mmu_idx, STACK_LAZYFP);
406         }
407 
408         stacked_ok = stacked_ok &&
409             v7m_stack_write(cpu, fpcar + 0x40,
410                             vfp_get_fpscr(env), mmu_idx, STACK_LAZYFP);
411         if (cpu_isar_feature(aa32_mve, cpu)) {
412             stacked_ok = stacked_ok &&
413                 v7m_stack_write(cpu, fpcar + 0x44,
414                                 env->v7m.vpr, mmu_idx, STACK_LAZYFP);
415         }
416     }
417 
418     /*
419      * We definitely pended an exception, but it's possible that it
420      * might not be able to be taken now. If its priority permits us
421      * to take it now, then we must not update the LSPACT or FP regs,
422      * but instead jump out to take the exception immediately.
423      * If it's just pending and won't be taken until the current
424      * handler exits, then we do update LSPACT and the FP regs.
425      */
426     take_exception = !stacked_ok &&
427         armv7m_nvic_can_take_pending_exception(env->nvic);
428 
429     qemu_mutex_unlock_iothread();
430 
431     if (take_exception) {
432         raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC());
433     }
434 
435     env->v7m.fpccr[is_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
436 
437     if (ts) {
438         /* Clear s0 to s31 and the FPSCR and VPR */
439         int i;
440 
441         for (i = 0; i < 32; i += 2) {
442             *aa32_vfp_dreg(env, i / 2) = 0;
443         }
444         vfp_set_fpscr(env, 0);
445         if (cpu_isar_feature(aa32_mve, cpu)) {
446             env->v7m.vpr = 0;
447         }
448     }
449     /*
450      * Otherwise s0 to s15, FPSCR and VPR are UNKNOWN; we choose to leave them
451      * unchanged.
452      */
453 }
454 
455 /*
456  * Write to v7M CONTROL.SPSEL bit for the specified security bank.
457  * This may change the current stack pointer between Main and Process
458  * stack pointers if it is done for the CONTROL register for the current
459  * security state.
460  */
461 static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
462                                                  bool new_spsel,
463                                                  bool secstate)
464 {
465     bool old_is_psp = v7m_using_psp(env);
466 
467     env->v7m.control[secstate] =
468         deposit32(env->v7m.control[secstate],
469                   R_V7M_CONTROL_SPSEL_SHIFT,
470                   R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
471 
472     if (secstate == env->v7m.secure) {
473         bool new_is_psp = v7m_using_psp(env);
474         uint32_t tmp;
475 
476         if (old_is_psp != new_is_psp) {
477             tmp = env->v7m.other_sp;
478             env->v7m.other_sp = env->regs[13];
479             env->regs[13] = tmp;
480         }
481     }
482 }
483 
484 /*
485  * Write to v7M CONTROL.SPSEL bit. This may change the current
486  * stack pointer between Main and Process stack pointers.
487  */
488 static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
489 {
490     write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
491 }
492 
493 void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
494 {
495     /*
496      * Write a new value to v7m.exception, thus transitioning into or out
497      * of Handler mode; this may result in a change of active stack pointer.
498      */
499     bool new_is_psp, old_is_psp = v7m_using_psp(env);
500     uint32_t tmp;
501 
502     env->v7m.exception = new_exc;
503 
504     new_is_psp = v7m_using_psp(env);
505 
506     if (old_is_psp != new_is_psp) {
507         tmp = env->v7m.other_sp;
508         env->v7m.other_sp = env->regs[13];
509         env->regs[13] = tmp;
510     }
511 }
512 
513 /* Switch M profile security state between NS and S */
514 static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
515 {
516     uint32_t new_ss_msp, new_ss_psp;
517 
518     if (env->v7m.secure == new_secstate) {
519         return;
520     }
521 
522     /*
523      * All the banked state is accessed by looking at env->v7m.secure
524      * except for the stack pointer; rearrange the SP appropriately.
525      */
526     new_ss_msp = env->v7m.other_ss_msp;
527     new_ss_psp = env->v7m.other_ss_psp;
528 
529     if (v7m_using_psp(env)) {
530         env->v7m.other_ss_psp = env->regs[13];
531         env->v7m.other_ss_msp = env->v7m.other_sp;
532     } else {
533         env->v7m.other_ss_msp = env->regs[13];
534         env->v7m.other_ss_psp = env->v7m.other_sp;
535     }
536 
537     env->v7m.secure = new_secstate;
538 
539     if (v7m_using_psp(env)) {
540         env->regs[13] = new_ss_psp;
541         env->v7m.other_sp = new_ss_msp;
542     } else {
543         env->regs[13] = new_ss_msp;
544         env->v7m.other_sp = new_ss_psp;
545     }
546 }
547 
548 void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
549 {
550     /*
551      * Handle v7M BXNS:
552      *  - if the return value is a magic value, do exception return (like BX)
553      *  - otherwise bit 0 of the return value is the target security state
554      */
555     uint32_t min_magic;
556 
557     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
558         /* Covers FNC_RETURN and EXC_RETURN magic */
559         min_magic = FNC_RETURN_MIN_MAGIC;
560     } else {
561         /* EXC_RETURN magic only */
562         min_magic = EXC_RETURN_MIN_MAGIC;
563     }
564 
565     if (dest >= min_magic) {
566         /*
567          * This is an exception return magic value; put it where
568          * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
569          * Note that if we ever add gen_ss_advance() singlestep support to
570          * M profile this should count as an "instruction execution complete"
571          * event (compare gen_bx_excret_final_code()).
572          */
573         env->regs[15] = dest & ~1;
574         env->thumb = dest & 1;
575         HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
576         /* notreached */
577     }
578 
579     /* translate.c should have made BXNS UNDEF unless we're secure */
580     assert(env->v7m.secure);
581 
582     if (!(dest & 1)) {
583         env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
584     }
585     switch_v7m_security_state(env, dest & 1);
586     env->thumb = true;
587     env->regs[15] = dest & ~1;
588     arm_rebuild_hflags(env);
589 }
590 
591 void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
592 {
593     /*
594      * Handle v7M BLXNS:
595      *  - bit 0 of the destination address is the target security state
596      */
597 
598     /* At this point regs[15] is the address just after the BLXNS */
599     uint32_t nextinst = env->regs[15] | 1;
600     uint32_t sp = env->regs[13] - 8;
601     uint32_t saved_psr;
602 
603     /* translate.c will have made BLXNS UNDEF unless we're secure */
604     assert(env->v7m.secure);
605 
606     if (dest & 1) {
607         /*
608          * Target is Secure, so this is just a normal BLX,
609          * except that the low bit doesn't indicate Thumb/not.
610          */
611         env->regs[14] = nextinst;
612         env->thumb = true;
613         env->regs[15] = dest & ~1;
614         return;
615     }
616 
617     /* Target is non-secure: first push a stack frame */
618     if (!QEMU_IS_ALIGNED(sp, 8)) {
619         qemu_log_mask(LOG_GUEST_ERROR,
620                       "BLXNS with misaligned SP is UNPREDICTABLE\n");
621     }
622 
623     if (sp < v7m_sp_limit(env)) {
624         raise_exception(env, EXCP_STKOF, 0, 1);
625     }
626 
627     saved_psr = env->v7m.exception;
628     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
629         saved_psr |= XPSR_SFPA;
630     }
631 
632     /* Note that these stores can throw exceptions on MPU faults */
633     cpu_stl_data_ra(env, sp, nextinst, GETPC());
634     cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
635 
636     env->regs[13] = sp;
637     env->regs[14] = 0xfeffffff;
638     if (arm_v7m_is_handler_mode(env)) {
639         /*
640          * Write a dummy value to IPSR, to avoid leaking the current secure
641          * exception number to non-secure code. This is guaranteed not
642          * to cause write_v7m_exception() to actually change stacks.
643          */
644         write_v7m_exception(env, 1);
645     }
646     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
647     switch_v7m_security_state(env, 0);
648     env->thumb = true;
649     env->regs[15] = dest;
650     arm_rebuild_hflags(env);
651 }
652 
653 static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
654                                 bool spsel)
655 {
656     /*
657      * Return a pointer to the location where we currently store the
658      * stack pointer for the requested security state and thread mode.
659      * This pointer will become invalid if the CPU state is updated
660      * such that the stack pointers are switched around (eg changing
661      * the SPSEL control bit).
662      * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
663      * Unlike that pseudocode, we require the caller to pass us in the
664      * SPSEL control bit value; this is because we also use this
665      * function in handling of pushing of the callee-saves registers
666      * part of the v8M stack frame (pseudocode PushCalleeStack()),
667      * and in the tailchain codepath the SPSEL bit comes from the exception
668      * return magic LR value from the previous exception. The pseudocode
669      * opencodes the stack-selection in PushCalleeStack(), but we prefer
670      * to make this utility function generic enough to do the job.
671      */
672     bool want_psp = threadmode && spsel;
673 
674     if (secure == env->v7m.secure) {
675         if (want_psp == v7m_using_psp(env)) {
676             return &env->regs[13];
677         } else {
678             return &env->v7m.other_sp;
679         }
680     } else {
681         if (want_psp) {
682             return &env->v7m.other_ss_psp;
683         } else {
684             return &env->v7m.other_ss_msp;
685         }
686     }
687 }
688 
689 static bool arm_v7m_load_vector(ARMCPU *cpu, int exc, bool targets_secure,
690                                 uint32_t *pvec)
691 {
692     CPUState *cs = CPU(cpu);
693     CPUARMState *env = &cpu->env;
694     MemTxResult result;
695     uint32_t addr = env->v7m.vecbase[targets_secure] + exc * 4;
696     uint32_t vector_entry;
697     MemTxAttrs attrs = {};
698     ARMMMUIdx mmu_idx;
699     bool exc_secure;
700 
701     qemu_log_mask(CPU_LOG_INT,
702                   "...loading from element %d of %s vector table at 0x%x\n",
703                   exc, targets_secure ? "secure" : "non-secure", addr);
704 
705     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targets_secure, true);
706 
707     /*
708      * We don't do a get_phys_addr() here because the rules for vector
709      * loads are special: they always use the default memory map, and
710      * the default memory map permits reads from all addresses.
711      * Since there's no easy way to pass through to pmsav8_mpu_lookup()
712      * that we want this special case which would always say "yes",
713      * we just do the SAU lookup here followed by a direct physical load.
714      */
715     attrs.secure = targets_secure;
716     attrs.user = false;
717 
718     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
719         V8M_SAttributes sattrs = {};
720 
721         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
722                             targets_secure, &sattrs);
723         if (sattrs.ns) {
724             attrs.secure = false;
725         } else if (!targets_secure) {
726             /*
727              * NS access to S memory: the underlying exception which we escalate
728              * to HardFault is SecureFault, which always targets Secure.
729              */
730             exc_secure = true;
731             goto load_fail;
732         }
733     }
734 
735     vector_entry = address_space_ldl(arm_addressspace(cs, attrs), addr,
736                                      attrs, &result);
737     if (result != MEMTX_OK) {
738         /*
739          * Underlying exception is BusFault: its target security state
740          * depends on BFHFNMINS.
741          */
742         exc_secure = !(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
743         goto load_fail;
744     }
745     *pvec = vector_entry;
746     qemu_log_mask(CPU_LOG_INT, "...loaded new PC 0x%x\n", *pvec);
747     return true;
748 
749 load_fail:
750     /*
751      * All vector table fetch fails are reported as HardFault, with
752      * HFSR.VECTTBL and .FORCED set. (FORCED is set because
753      * technically the underlying exception is a SecureFault or BusFault
754      * that is escalated to HardFault.) This is a terminal exception,
755      * so we will either take the HardFault immediately or else enter
756      * lockup (the latter case is handled in armv7m_nvic_set_pending_derived()).
757      * The HardFault is Secure if BFHFNMINS is 0 (meaning that all HFs are
758      * secure); otherwise it targets the same security state as the
759      * underlying exception.
760      * In v8.1M HardFaults from vector table fetch fails don't set FORCED.
761      */
762     if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
763         exc_secure = true;
764     }
765     env->v7m.hfsr |= R_V7M_HFSR_VECTTBL_MASK;
766     if (!arm_feature(env, ARM_FEATURE_V8_1M)) {
767         env->v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
768     }
769     armv7m_nvic_set_pending_derived(env->nvic, ARMV7M_EXCP_HARD, exc_secure);
770     return false;
771 }
772 
773 static uint32_t v7m_integrity_sig(CPUARMState *env, uint32_t lr)
774 {
775     /*
776      * Return the integrity signature value for the callee-saves
777      * stack frame section. @lr is the exception return payload/LR value
778      * whose FType bit forms bit 0 of the signature if FP is present.
779      */
780     uint32_t sig = 0xfefa125a;
781 
782     if (!cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))
783         || (lr & R_V7M_EXCRET_FTYPE_MASK)) {
784         sig |= 1;
785     }
786     return sig;
787 }
788 
789 static bool v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain,
790                                   bool ignore_faults)
791 {
792     /*
793      * For v8M, push the callee-saves register part of the stack frame.
794      * Compare the v8M pseudocode PushCalleeStack().
795      * In the tailchaining case this may not be the current stack.
796      */
797     CPUARMState *env = &cpu->env;
798     uint32_t *frame_sp_p;
799     uint32_t frameptr;
800     ARMMMUIdx mmu_idx;
801     bool stacked_ok;
802     uint32_t limit;
803     bool want_psp;
804     uint32_t sig;
805     StackingMode smode = ignore_faults ? STACK_IGNFAULTS : STACK_NORMAL;
806 
807     if (dotailchain) {
808         bool mode = lr & R_V7M_EXCRET_MODE_MASK;
809         bool priv = !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_NPRIV_MASK) ||
810             !mode;
811 
812         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, M_REG_S, priv);
813         frame_sp_p = get_v7m_sp_ptr(env, M_REG_S, mode,
814                                     lr & R_V7M_EXCRET_SPSEL_MASK);
815         want_psp = mode && (lr & R_V7M_EXCRET_SPSEL_MASK);
816         if (want_psp) {
817             limit = env->v7m.psplim[M_REG_S];
818         } else {
819             limit = env->v7m.msplim[M_REG_S];
820         }
821     } else {
822         mmu_idx = arm_mmu_idx(env);
823         frame_sp_p = &env->regs[13];
824         limit = v7m_sp_limit(env);
825     }
826 
827     frameptr = *frame_sp_p - 0x28;
828     if (frameptr < limit) {
829         /*
830          * Stack limit failure: set SP to the limit value, and generate
831          * STKOF UsageFault. Stack pushes below the limit must not be
832          * performed. It is IMPDEF whether pushes above the limit are
833          * performed; we choose not to.
834          */
835         qemu_log_mask(CPU_LOG_INT,
836                       "...STKOF during callee-saves register stacking\n");
837         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
838         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
839                                 env->v7m.secure);
840         *frame_sp_p = limit;
841         return true;
842     }
843 
844     /*
845      * Write as much of the stack frame as we can. A write failure may
846      * cause us to pend a derived exception.
847      */
848     sig = v7m_integrity_sig(env, lr);
849     stacked_ok =
850         v7m_stack_write(cpu, frameptr, sig, mmu_idx, smode) &&
851         v7m_stack_write(cpu, frameptr + 0x8, env->regs[4], mmu_idx, smode) &&
852         v7m_stack_write(cpu, frameptr + 0xc, env->regs[5], mmu_idx, smode) &&
853         v7m_stack_write(cpu, frameptr + 0x10, env->regs[6], mmu_idx, smode) &&
854         v7m_stack_write(cpu, frameptr + 0x14, env->regs[7], mmu_idx, smode) &&
855         v7m_stack_write(cpu, frameptr + 0x18, env->regs[8], mmu_idx, smode) &&
856         v7m_stack_write(cpu, frameptr + 0x1c, env->regs[9], mmu_idx, smode) &&
857         v7m_stack_write(cpu, frameptr + 0x20, env->regs[10], mmu_idx, smode) &&
858         v7m_stack_write(cpu, frameptr + 0x24, env->regs[11], mmu_idx, smode);
859 
860     /* Update SP regardless of whether any of the stack accesses failed. */
861     *frame_sp_p = frameptr;
862 
863     return !stacked_ok;
864 }
865 
866 static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
867                                 bool ignore_stackfaults)
868 {
869     /*
870      * Do the "take the exception" parts of exception entry,
871      * but not the pushing of state to the stack. This is
872      * similar to the pseudocode ExceptionTaken() function.
873      */
874     CPUARMState *env = &cpu->env;
875     uint32_t addr;
876     bool targets_secure;
877     int exc;
878     bool push_failed = false;
879 
880     armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
881     qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
882                   targets_secure ? "secure" : "nonsecure", exc);
883 
884     if (dotailchain) {
885         /* Sanitize LR FType and PREFIX bits */
886         if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
887             lr |= R_V7M_EXCRET_FTYPE_MASK;
888         }
889         lr = deposit32(lr, 24, 8, 0xff);
890     }
891 
892     if (arm_feature(env, ARM_FEATURE_V8)) {
893         if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
894             (lr & R_V7M_EXCRET_S_MASK)) {
895             /*
896              * The background code (the owner of the registers in the
897              * exception frame) is Secure. This means it may either already
898              * have or now needs to push callee-saves registers.
899              */
900             if (targets_secure) {
901                 if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
902                     /*
903                      * We took an exception from Secure to NonSecure
904                      * (which means the callee-saved registers got stacked)
905                      * and are now tailchaining to a Secure exception.
906                      * Clear DCRS so eventual return from this Secure
907                      * exception unstacks the callee-saved registers.
908                      */
909                     lr &= ~R_V7M_EXCRET_DCRS_MASK;
910                 }
911             } else {
912                 /*
913                  * We're going to a non-secure exception; push the
914                  * callee-saves registers to the stack now, if they're
915                  * not already saved.
916                  */
917                 if (lr & R_V7M_EXCRET_DCRS_MASK &&
918                     !(dotailchain && !(lr & R_V7M_EXCRET_ES_MASK))) {
919                     push_failed = v7m_push_callee_stack(cpu, lr, dotailchain,
920                                                         ignore_stackfaults);
921                 }
922                 lr |= R_V7M_EXCRET_DCRS_MASK;
923             }
924         }
925 
926         lr &= ~R_V7M_EXCRET_ES_MASK;
927         if (targets_secure) {
928             lr |= R_V7M_EXCRET_ES_MASK;
929         }
930         lr &= ~R_V7M_EXCRET_SPSEL_MASK;
931         if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
932             lr |= R_V7M_EXCRET_SPSEL_MASK;
933         }
934 
935         /*
936          * Clear registers if necessary to prevent non-secure exception
937          * code being able to see register values from secure code.
938          * Where register values become architecturally UNKNOWN we leave
939          * them with their previous values. v8.1M is tighter than v8.0M
940          * here and always zeroes the caller-saved registers regardless
941          * of the security state the exception is targeting.
942          */
943         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
944             if (!targets_secure || arm_feature(env, ARM_FEATURE_V8_1M)) {
945                 /*
946                  * Always clear the caller-saved registers (they have been
947                  * pushed to the stack earlier in v7m_push_stack()).
948                  * Clear callee-saved registers if the background code is
949                  * Secure (in which case these regs were saved in
950                  * v7m_push_callee_stack()).
951                  */
952                 int i;
953                 /*
954                  * r4..r11 are callee-saves, zero only if background
955                  * state was Secure (EXCRET.S == 1) and exception
956                  * targets Non-secure state
957                  */
958                 bool zero_callee_saves = !targets_secure &&
959                     (lr & R_V7M_EXCRET_S_MASK);
960 
961                 for (i = 0; i < 13; i++) {
962                     if (i < 4 || i > 11 || zero_callee_saves) {
963                         env->regs[i] = 0;
964                     }
965                 }
966                 /* Clear EAPSR */
967                 xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
968             }
969         }
970     }
971 
972     if (push_failed && !ignore_stackfaults) {
973         /*
974          * Derived exception on callee-saves register stacking:
975          * we might now want to take a different exception which
976          * targets a different security state, so try again from the top.
977          */
978         qemu_log_mask(CPU_LOG_INT,
979                       "...derived exception on callee-saves register stacking");
980         v7m_exception_taken(cpu, lr, true, true);
981         return;
982     }
983 
984     if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
985         /* Vector load failed: derived exception */
986         qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
987         v7m_exception_taken(cpu, lr, true, true);
988         return;
989     }
990 
991     /*
992      * Now we've done everything that might cause a derived exception
993      * we can go ahead and activate whichever exception we're going to
994      * take (which might now be the derived exception).
995      */
996     armv7m_nvic_acknowledge_irq(env->nvic);
997 
998     /* Switch to target security state -- must do this before writing SPSEL */
999     switch_v7m_security_state(env, targets_secure);
1000     write_v7m_control_spsel(env, 0);
1001     arm_clear_exclusive(env);
1002     /* Clear SFPA and FPCA (has no effect if no FPU) */
1003     env->v7m.control[M_REG_S] &=
1004         ~(R_V7M_CONTROL_FPCA_MASK | R_V7M_CONTROL_SFPA_MASK);
1005     /* Clear IT bits */
1006     env->condexec_bits = 0;
1007     env->regs[14] = lr;
1008     env->regs[15] = addr & 0xfffffffe;
1009     env->thumb = addr & 1;
1010     arm_rebuild_hflags(env);
1011 }
1012 
1013 static void v7m_update_fpccr(CPUARMState *env, uint32_t frameptr,
1014                              bool apply_splim)
1015 {
1016     /*
1017      * Like the pseudocode UpdateFPCCR: save state in FPCAR and FPCCR
1018      * that we will need later in order to do lazy FP reg stacking.
1019      */
1020     bool is_secure = env->v7m.secure;
1021     NVICState *nvic = env->nvic;
1022     /*
1023      * Some bits are unbanked and live always in fpccr[M_REG_S]; some bits
1024      * are banked and we want to update the bit in the bank for the
1025      * current security state; and in one case we want to specifically
1026      * update the NS banked version of a bit even if we are secure.
1027      */
1028     uint32_t *fpccr_s = &env->v7m.fpccr[M_REG_S];
1029     uint32_t *fpccr_ns = &env->v7m.fpccr[M_REG_NS];
1030     uint32_t *fpccr = &env->v7m.fpccr[is_secure];
1031     bool hfrdy, bfrdy, mmrdy, ns_ufrdy, s_ufrdy, sfrdy, monrdy;
1032 
1033     env->v7m.fpcar[is_secure] = frameptr & ~0x7;
1034 
1035     if (apply_splim && arm_feature(env, ARM_FEATURE_V8)) {
1036         bool splimviol;
1037         uint32_t splim = v7m_sp_limit(env);
1038         bool ign = armv7m_nvic_neg_prio_requested(nvic, is_secure) &&
1039             (env->v7m.ccr[is_secure] & R_V7M_CCR_STKOFHFNMIGN_MASK);
1040 
1041         splimviol = !ign && frameptr < splim;
1042         *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, SPLIMVIOL, splimviol);
1043     }
1044 
1045     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, LSPACT, 1);
1046 
1047     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, S, is_secure);
1048 
1049     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, USER, arm_current_el(env) == 0);
1050 
1051     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, THREAD,
1052                         !arm_v7m_is_handler_mode(env));
1053 
1054     hfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_HARD, false);
1055     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
1056 
1057     bfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_BUS, false);
1058     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
1059 
1060     mmrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_MEM, is_secure);
1061     *fpccr = FIELD_DP32(*fpccr, V7M_FPCCR, MMRDY, mmrdy);
1062 
1063     ns_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, false);
1064     *fpccr_ns = FIELD_DP32(*fpccr_ns, V7M_FPCCR, UFRDY, ns_ufrdy);
1065 
1066     monrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_DEBUG, false);
1067     *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, MONRDY, monrdy);
1068 
1069     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1070         s_ufrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_USAGE, true);
1071         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, UFRDY, s_ufrdy);
1072 
1073         sfrdy = armv7m_nvic_get_ready_status(nvic, ARMV7M_EXCP_SECURE, false);
1074         *fpccr_s = FIELD_DP32(*fpccr_s, V7M_FPCCR, SFRDY, sfrdy);
1075     }
1076 }
1077 
1078 void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
1079 {
1080     /* fptr is the value of Rn, the frame pointer we store the FP regs to */
1081     ARMCPU *cpu = env_archcpu(env);
1082     bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1083     bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
1084     uintptr_t ra = GETPC();
1085 
1086     assert(env->v7m.secure);
1087 
1088     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1089         return;
1090     }
1091 
1092     /* Check access to the coprocessor is permitted */
1093     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1094         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1095     }
1096 
1097     if (lspact) {
1098         /* LSPACT should not be active when there is active FP state */
1099         raise_exception_ra(env, EXCP_LSERR, 0, 1, GETPC());
1100     }
1101 
1102     if (fptr & 7) {
1103         raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1104     }
1105 
1106     /*
1107      * Note that we do not use v7m_stack_write() here, because the
1108      * accesses should not set the FSR bits for stacking errors if they
1109      * fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
1110      * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
1111      * and longjmp out.
1112      */
1113     if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1114         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1115         int i;
1116 
1117         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1118             uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1119             uint32_t faddr = fptr + 4 * i;
1120             uint32_t slo = extract64(dn, 0, 32);
1121             uint32_t shi = extract64(dn, 32, 32);
1122 
1123             if (i >= 16) {
1124                 faddr += 8; /* skip the slot for the FPSCR */
1125             }
1126             cpu_stl_data_ra(env, faddr, slo, ra);
1127             cpu_stl_data_ra(env, faddr + 4, shi, ra);
1128         }
1129         cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
1130         if (cpu_isar_feature(aa32_mve, cpu)) {
1131             cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
1132         }
1133 
1134         /*
1135          * If TS is 0 then s0 to s15, FPSCR and VPR are UNKNOWN; we choose to
1136          * leave them unchanged, matching our choice in v7m_preserve_fp_state.
1137          */
1138         if (ts) {
1139             for (i = 0; i < 32; i += 2) {
1140                 *aa32_vfp_dreg(env, i / 2) = 0;
1141             }
1142             vfp_set_fpscr(env, 0);
1143             if (cpu_isar_feature(aa32_mve, cpu)) {
1144                 env->v7m.vpr = 0;
1145             }
1146         }
1147     } else {
1148         v7m_update_fpccr(env, fptr, false);
1149     }
1150 
1151     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
1152 }
1153 
1154 void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
1155 {
1156     ARMCPU *cpu = env_archcpu(env);
1157     uintptr_t ra = GETPC();
1158 
1159     /* fptr is the value of Rn, the frame pointer we load the FP regs from */
1160     assert(env->v7m.secure);
1161 
1162     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1163         return;
1164     }
1165 
1166     /* Check access to the coprocessor is permitted */
1167     if (!v7m_cpacr_pass(env, true, arm_current_el(env) != 0)) {
1168         raise_exception_ra(env, EXCP_NOCP, 0, 1, GETPC());
1169     }
1170 
1171     if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1172         /* State in FP is still valid */
1173         env->v7m.fpccr[M_REG_S] &= ~R_V7M_FPCCR_LSPACT_MASK;
1174     } else {
1175         bool ts = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK;
1176         int i;
1177         uint32_t fpscr;
1178 
1179         if (fptr & 7) {
1180             raise_exception_ra(env, EXCP_UNALIGNED, 0, 1, GETPC());
1181         }
1182 
1183         for (i = 0; i < (ts ? 32 : 16); i += 2) {
1184             uint32_t slo, shi;
1185             uint64_t dn;
1186             uint32_t faddr = fptr + 4 * i;
1187 
1188             if (i >= 16) {
1189                 faddr += 8; /* skip the slot for the FPSCR and VPR */
1190             }
1191 
1192             slo = cpu_ldl_data_ra(env, faddr, ra);
1193             shi = cpu_ldl_data_ra(env, faddr + 4, ra);
1194 
1195             dn = (uint64_t) shi << 32 | slo;
1196             *aa32_vfp_dreg(env, i / 2) = dn;
1197         }
1198         fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
1199         vfp_set_fpscr(env, fpscr);
1200         if (cpu_isar_feature(aa32_mve, cpu)) {
1201             env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
1202         }
1203     }
1204 
1205     env->v7m.control[M_REG_S] |= R_V7M_CONTROL_FPCA_MASK;
1206 }
1207 
1208 static bool v7m_push_stack(ARMCPU *cpu)
1209 {
1210     /*
1211      * Do the "set up stack frame" part of exception entry,
1212      * similar to pseudocode PushStack().
1213      * Return true if we generate a derived exception (and so
1214      * should ignore further stack faults trying to process
1215      * that derived exception.)
1216      */
1217     bool stacked_ok = true, limitviol = false;
1218     CPUARMState *env = &cpu->env;
1219     uint32_t xpsr = xpsr_read(env);
1220     uint32_t frameptr = env->regs[13];
1221     ARMMMUIdx mmu_idx = arm_mmu_idx(env);
1222     uint32_t framesize;
1223     bool nsacr_cp10 = extract32(env->v7m.nsacr, 10, 1);
1224 
1225     if ((env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) &&
1226         (env->v7m.secure || nsacr_cp10)) {
1227         if (env->v7m.secure &&
1228             env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK) {
1229             framesize = 0xa8;
1230         } else {
1231             framesize = 0x68;
1232         }
1233     } else {
1234         framesize = 0x20;
1235     }
1236 
1237     /* Align stack pointer if the guest wants that */
1238     if ((frameptr & 4) &&
1239         (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
1240         frameptr -= 4;
1241         xpsr |= XPSR_SPREALIGN;
1242     }
1243 
1244     xpsr &= ~XPSR_SFPA;
1245     if (env->v7m.secure &&
1246         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)) {
1247         xpsr |= XPSR_SFPA;
1248     }
1249 
1250     frameptr -= framesize;
1251 
1252     if (arm_feature(env, ARM_FEATURE_V8)) {
1253         uint32_t limit = v7m_sp_limit(env);
1254 
1255         if (frameptr < limit) {
1256             /*
1257              * Stack limit failure: set SP to the limit value, and generate
1258              * STKOF UsageFault. Stack pushes below the limit must not be
1259              * performed. It is IMPDEF whether pushes above the limit are
1260              * performed; we choose not to.
1261              */
1262             qemu_log_mask(CPU_LOG_INT,
1263                           "...STKOF during stacking\n");
1264             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
1265             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1266                                     env->v7m.secure);
1267             env->regs[13] = limit;
1268             /*
1269              * We won't try to perform any further memory accesses but
1270              * we must continue through the following code to check for
1271              * permission faults during FPU state preservation, and we
1272              * must update FPCCR if lazy stacking is enabled.
1273              */
1274             limitviol = true;
1275             stacked_ok = false;
1276         }
1277     }
1278 
1279     /*
1280      * Write as much of the stack frame as we can. If we fail a stack
1281      * write this will result in a derived exception being pended
1282      * (which may be taken in preference to the one we started with
1283      * if it has higher priority).
1284      */
1285     stacked_ok = stacked_ok &&
1286         v7m_stack_write(cpu, frameptr, env->regs[0], mmu_idx, STACK_NORMAL) &&
1287         v7m_stack_write(cpu, frameptr + 4, env->regs[1],
1288                         mmu_idx, STACK_NORMAL) &&
1289         v7m_stack_write(cpu, frameptr + 8, env->regs[2],
1290                         mmu_idx, STACK_NORMAL) &&
1291         v7m_stack_write(cpu, frameptr + 12, env->regs[3],
1292                         mmu_idx, STACK_NORMAL) &&
1293         v7m_stack_write(cpu, frameptr + 16, env->regs[12],
1294                         mmu_idx, STACK_NORMAL) &&
1295         v7m_stack_write(cpu, frameptr + 20, env->regs[14],
1296                         mmu_idx, STACK_NORMAL) &&
1297         v7m_stack_write(cpu, frameptr + 24, env->regs[15],
1298                         mmu_idx, STACK_NORMAL) &&
1299         v7m_stack_write(cpu, frameptr + 28, xpsr, mmu_idx, STACK_NORMAL);
1300 
1301     if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) {
1302         /* FPU is active, try to save its registers */
1303         bool fpccr_s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
1304         bool lspact = env->v7m.fpccr[fpccr_s] & R_V7M_FPCCR_LSPACT_MASK;
1305 
1306         if (lspact && arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1307             qemu_log_mask(CPU_LOG_INT,
1308                           "...SecureFault because LSPACT and FPCA both set\n");
1309             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1310             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1311         } else if (!env->v7m.secure && !nsacr_cp10) {
1312             qemu_log_mask(CPU_LOG_INT,
1313                           "...Secure UsageFault with CFSR.NOCP because "
1314                           "NSACR.CP10 prevents stacking FP regs\n");
1315             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, M_REG_S);
1316             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1317         } else {
1318             if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
1319                 /* Lazy stacking disabled, save registers now */
1320                 int i;
1321                 bool cpacr_pass = v7m_cpacr_pass(env, env->v7m.secure,
1322                                                  arm_current_el(env) != 0);
1323 
1324                 if (stacked_ok && !cpacr_pass) {
1325                     /*
1326                      * Take UsageFault if CPACR forbids access. The pseudocode
1327                      * here does a full CheckCPEnabled() but we know the NSACR
1328                      * check can never fail as we have already handled that.
1329                      */
1330                     qemu_log_mask(CPU_LOG_INT,
1331                                   "...UsageFault with CFSR.NOCP because "
1332                                   "CPACR.CP10 prevents stacking FP regs\n");
1333                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1334                                             env->v7m.secure);
1335                     env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
1336                     stacked_ok = false;
1337                 }
1338 
1339                 for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1340                     uint64_t dn = *aa32_vfp_dreg(env, i / 2);
1341                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1342                     uint32_t slo = extract64(dn, 0, 32);
1343                     uint32_t shi = extract64(dn, 32, 32);
1344 
1345                     if (i >= 16) {
1346                         faddr += 8; /* skip the slot for the FPSCR and VPR */
1347                     }
1348                     stacked_ok = stacked_ok &&
1349                         v7m_stack_write(cpu, faddr, slo,
1350                                         mmu_idx, STACK_NORMAL) &&
1351                         v7m_stack_write(cpu, faddr + 4, shi,
1352                                         mmu_idx, STACK_NORMAL);
1353                 }
1354                 stacked_ok = stacked_ok &&
1355                     v7m_stack_write(cpu, frameptr + 0x60,
1356                                     vfp_get_fpscr(env), mmu_idx, STACK_NORMAL);
1357                 if (cpu_isar_feature(aa32_mve, cpu)) {
1358                     stacked_ok = stacked_ok &&
1359                         v7m_stack_write(cpu, frameptr + 0x64,
1360                                         env->v7m.vpr, mmu_idx, STACK_NORMAL);
1361                 }
1362                 if (cpacr_pass) {
1363                     for (i = 0; i < ((framesize == 0xa8) ? 32 : 16); i += 2) {
1364                         *aa32_vfp_dreg(env, i / 2) = 0;
1365                     }
1366                     vfp_set_fpscr(env, 0);
1367                     if (cpu_isar_feature(aa32_mve, cpu)) {
1368                         env->v7m.vpr = 0;
1369                     }
1370                 }
1371             } else {
1372                 /* Lazy stacking enabled, save necessary info to stack later */
1373                 v7m_update_fpccr(env, frameptr + 0x20, true);
1374             }
1375         }
1376     }
1377 
1378     /*
1379      * If we broke a stack limit then SP was already updated earlier;
1380      * otherwise we update SP regardless of whether any of the stack
1381      * accesses failed or we took some other kind of fault.
1382      */
1383     if (!limitviol) {
1384         env->regs[13] = frameptr;
1385     }
1386 
1387     return !stacked_ok;
1388 }
1389 
1390 static void do_v7m_exception_exit(ARMCPU *cpu)
1391 {
1392     CPUARMState *env = &cpu->env;
1393     uint32_t excret;
1394     uint32_t xpsr, xpsr_mask;
1395     bool ufault = false;
1396     bool sfault = false;
1397     bool return_to_sp_process;
1398     bool return_to_handler;
1399     bool rettobase = false;
1400     bool exc_secure = false;
1401     bool return_to_secure;
1402     bool ftype;
1403     bool restore_s16_s31 = false;
1404 
1405     /*
1406      * If we're not in Handler mode then jumps to magic exception-exit
1407      * addresses don't have magic behaviour. However for the v8M
1408      * security extensions the magic secure-function-return has to
1409      * work in thread mode too, so to avoid doing an extra check in
1410      * the generated code we allow exception-exit magic to also cause the
1411      * internal exception and bring us here in thread mode. Correct code
1412      * will never try to do this (the following insn fetch will always
1413      * fault) so we the overhead of having taken an unnecessary exception
1414      * doesn't matter.
1415      */
1416     if (!arm_v7m_is_handler_mode(env)) {
1417         return;
1418     }
1419 
1420     /*
1421      * In the spec pseudocode ExceptionReturn() is called directly
1422      * from BXWritePC() and gets the full target PC value including
1423      * bit zero. In QEMU's implementation we treat it as a normal
1424      * jump-to-register (which is then caught later on), and so split
1425      * the target value up between env->regs[15] and env->thumb in
1426      * gen_bx(). Reconstitute it.
1427      */
1428     excret = env->regs[15];
1429     if (env->thumb) {
1430         excret |= 1;
1431     }
1432 
1433     qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
1434                   " previous exception %d\n",
1435                   excret, env->v7m.exception);
1436 
1437     if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
1438         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
1439                       "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
1440                       excret);
1441     }
1442 
1443     ftype = excret & R_V7M_EXCRET_FTYPE_MASK;
1444 
1445     if (!ftype && !cpu_isar_feature(aa32_vfp_simd, cpu)) {
1446         qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero FTYPE in exception "
1447                       "exit PC value 0x%" PRIx32 " is UNPREDICTABLE "
1448                       "if FPU not present\n",
1449                       excret);
1450         ftype = true;
1451     }
1452 
1453     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1454         /*
1455          * EXC_RETURN.ES validation check (R_SMFL). We must do this before
1456          * we pick which FAULTMASK to clear.
1457          */
1458         if (!env->v7m.secure &&
1459             ((excret & R_V7M_EXCRET_ES_MASK) ||
1460              !(excret & R_V7M_EXCRET_DCRS_MASK))) {
1461             sfault = 1;
1462             /* For all other purposes, treat ES as 0 (R_HXSR) */
1463             excret &= ~R_V7M_EXCRET_ES_MASK;
1464         }
1465         exc_secure = excret & R_V7M_EXCRET_ES_MASK;
1466     }
1467 
1468     if (env->v7m.exception != ARMV7M_EXCP_NMI) {
1469         /*
1470          * Auto-clear FAULTMASK on return from other than NMI.
1471          * If the security extension is implemented then this only
1472          * happens if the raw execution priority is >= 0; the
1473          * value of the ES bit in the exception return value indicates
1474          * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
1475          */
1476         if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1477             if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
1478                 env->v7m.faultmask[exc_secure] = 0;
1479             }
1480         } else {
1481             env->v7m.faultmask[M_REG_NS] = 0;
1482         }
1483     }
1484 
1485     switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
1486                                      exc_secure)) {
1487     case -1:
1488         /* attempt to exit an exception that isn't active */
1489         ufault = true;
1490         break;
1491     case 0:
1492         /* still an irq active now */
1493         break;
1494     case 1:
1495         /*
1496          * We returned to base exception level, no nesting.
1497          * (In the pseudocode this is written using "NestedActivation != 1"
1498          * where we have 'rettobase == false'.)
1499          */
1500         rettobase = true;
1501         break;
1502     default:
1503         g_assert_not_reached();
1504     }
1505 
1506     return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
1507     return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
1508     return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
1509         (excret & R_V7M_EXCRET_S_MASK);
1510 
1511     if (arm_feature(env, ARM_FEATURE_V8)) {
1512         if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
1513             /*
1514              * UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
1515              * we choose to take the UsageFault.
1516              */
1517             if ((excret & R_V7M_EXCRET_S_MASK) ||
1518                 (excret & R_V7M_EXCRET_ES_MASK) ||
1519                 !(excret & R_V7M_EXCRET_DCRS_MASK)) {
1520                 ufault = true;
1521             }
1522         }
1523         if (excret & R_V7M_EXCRET_RES0_MASK) {
1524             ufault = true;
1525         }
1526     } else {
1527         /* For v7M we only recognize certain combinations of the low bits */
1528         switch (excret & 0xf) {
1529         case 1: /* Return to Handler */
1530             break;
1531         case 13: /* Return to Thread using Process stack */
1532         case 9: /* Return to Thread using Main stack */
1533             /*
1534              * We only need to check NONBASETHRDENA for v7M, because in
1535              * v8M this bit does not exist (it is RES1).
1536              */
1537             if (!rettobase &&
1538                 !(env->v7m.ccr[env->v7m.secure] &
1539                   R_V7M_CCR_NONBASETHRDENA_MASK)) {
1540                 ufault = true;
1541             }
1542             break;
1543         default:
1544             ufault = true;
1545         }
1546     }
1547 
1548     /*
1549      * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
1550      * Handler mode (and will be until we write the new XPSR.Interrupt
1551      * field) this does not switch around the current stack pointer.
1552      * We must do this before we do any kind of tailchaining, including
1553      * for the derived exceptions on integrity check failures, or we will
1554      * give the guest an incorrect EXCRET.SPSEL value on exception entry.
1555      */
1556     write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
1557 
1558     /*
1559      * Clear scratch FP values left in caller saved registers; this
1560      * must happen before any kind of tail chaining.
1561      */
1562     if ((env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_CLRONRET_MASK) &&
1563         (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
1564         if (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK) {
1565             env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1566             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1567             qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1568                           "stackframe: error during lazy state deactivation\n");
1569             v7m_exception_taken(cpu, excret, true, false);
1570             return;
1571         } else {
1572             if (arm_feature(env, ARM_FEATURE_V8_1M)) {
1573                 /* v8.1M adds this NOCP check */
1574                 bool nsacr_pass = exc_secure ||
1575                     extract32(env->v7m.nsacr, 10, 1);
1576                 bool cpacr_pass = v7m_cpacr_pass(env, exc_secure, true);
1577                 if (!nsacr_pass) {
1578                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1579                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_NOCP_MASK;
1580                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1581                         "stackframe: NSACR prevents clearing FPU registers\n");
1582                     v7m_exception_taken(cpu, excret, true, false);
1583                     return;
1584                 } else if (!cpacr_pass) {
1585                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1586                                             exc_secure);
1587                     env->v7m.cfsr[exc_secure] |= R_V7M_CFSR_NOCP_MASK;
1588                     qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1589                         "stackframe: CPACR prevents clearing FPU registers\n");
1590                     v7m_exception_taken(cpu, excret, true, false);
1591                     return;
1592                 }
1593             }
1594             /* Clear s0..s15, FPSCR and VPR */
1595             int i;
1596 
1597             for (i = 0; i < 16; i += 2) {
1598                 *aa32_vfp_dreg(env, i / 2) = 0;
1599             }
1600             vfp_set_fpscr(env, 0);
1601             if (cpu_isar_feature(aa32_mve, cpu)) {
1602                 env->v7m.vpr = 0;
1603             }
1604         }
1605     }
1606 
1607     if (sfault) {
1608         env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
1609         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1610         qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1611                       "stackframe: failed EXC_RETURN.ES validity check\n");
1612         v7m_exception_taken(cpu, excret, true, false);
1613         return;
1614     }
1615 
1616     if (ufault) {
1617         /*
1618          * Bad exception return: instead of popping the exception
1619          * stack, directly take a usage fault on the current stack.
1620          */
1621         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1622         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
1623         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1624                       "stackframe: failed exception return integrity check\n");
1625         v7m_exception_taken(cpu, excret, true, false);
1626         return;
1627     }
1628 
1629     /*
1630      * Tailchaining: if there is currently a pending exception that
1631      * is high enough priority to preempt execution at the level we're
1632      * about to return to, then just directly take that exception now,
1633      * avoiding an unstack-and-then-stack. Note that now we have
1634      * deactivated the previous exception by calling armv7m_nvic_complete_irq()
1635      * our current execution priority is already the execution priority we are
1636      * returning to -- none of the state we would unstack or set based on
1637      * the EXCRET value affects it.
1638      */
1639     if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
1640         qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
1641         v7m_exception_taken(cpu, excret, true, false);
1642         return;
1643     }
1644 
1645     switch_v7m_security_state(env, return_to_secure);
1646 
1647     {
1648         /*
1649          * The stack pointer we should be reading the exception frame from
1650          * depends on bits in the magic exception return type value (and
1651          * for v8M isn't necessarily the stack pointer we will eventually
1652          * end up resuming execution with). Get a pointer to the location
1653          * in the CPU state struct where the SP we need is currently being
1654          * stored; we will use and modify it in place.
1655          * We use this limited C variable scope so we don't accidentally
1656          * use 'frame_sp_p' after we do something that makes it invalid.
1657          */
1658         bool spsel = env->v7m.control[return_to_secure] & R_V7M_CONTROL_SPSEL_MASK;
1659         uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
1660                                               return_to_secure,
1661                                               !return_to_handler,
1662                                               spsel);
1663         uint32_t frameptr = *frame_sp_p;
1664         bool pop_ok = true;
1665         ARMMMUIdx mmu_idx;
1666         bool return_to_priv = return_to_handler ||
1667             !(env->v7m.control[return_to_secure] & R_V7M_CONTROL_NPRIV_MASK);
1668 
1669         mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, return_to_secure,
1670                                                         return_to_priv);
1671 
1672         if (!QEMU_IS_ALIGNED(frameptr, 8) &&
1673             arm_feature(env, ARM_FEATURE_V8)) {
1674             qemu_log_mask(LOG_GUEST_ERROR,
1675                           "M profile exception return with non-8-aligned SP "
1676                           "for destination state is UNPREDICTABLE\n");
1677         }
1678 
1679         /* Do we need to pop callee-saved registers? */
1680         if (return_to_secure &&
1681             ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
1682              (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
1683             uint32_t actual_sig;
1684 
1685             pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx);
1686 
1687             if (pop_ok && v7m_integrity_sig(env, excret) != actual_sig) {
1688                 /* Take a SecureFault on the current stack */
1689                 env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
1690                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1691                 qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
1692                               "stackframe: failed exception return integrity "
1693                               "signature check\n");
1694                 v7m_exception_taken(cpu, excret, true, false);
1695                 return;
1696             }
1697 
1698             pop_ok = pop_ok &&
1699                 v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) &&
1700                 v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) &&
1701                 v7m_stack_read(cpu, &env->regs[6], frameptr + 0x10, mmu_idx) &&
1702                 v7m_stack_read(cpu, &env->regs[7], frameptr + 0x14, mmu_idx) &&
1703                 v7m_stack_read(cpu, &env->regs[8], frameptr + 0x18, mmu_idx) &&
1704                 v7m_stack_read(cpu, &env->regs[9], frameptr + 0x1c, mmu_idx) &&
1705                 v7m_stack_read(cpu, &env->regs[10], frameptr + 0x20, mmu_idx) &&
1706                 v7m_stack_read(cpu, &env->regs[11], frameptr + 0x24, mmu_idx);
1707 
1708             frameptr += 0x28;
1709         }
1710 
1711         /* Pop registers */
1712         pop_ok = pop_ok &&
1713             v7m_stack_read(cpu, &env->regs[0], frameptr, mmu_idx) &&
1714             v7m_stack_read(cpu, &env->regs[1], frameptr + 0x4, mmu_idx) &&
1715             v7m_stack_read(cpu, &env->regs[2], frameptr + 0x8, mmu_idx) &&
1716             v7m_stack_read(cpu, &env->regs[3], frameptr + 0xc, mmu_idx) &&
1717             v7m_stack_read(cpu, &env->regs[12], frameptr + 0x10, mmu_idx) &&
1718             v7m_stack_read(cpu, &env->regs[14], frameptr + 0x14, mmu_idx) &&
1719             v7m_stack_read(cpu, &env->regs[15], frameptr + 0x18, mmu_idx) &&
1720             v7m_stack_read(cpu, &xpsr, frameptr + 0x1c, mmu_idx);
1721 
1722         if (!pop_ok) {
1723             /*
1724              * v7m_stack_read() pended a fault, so take it (as a tail
1725              * chained exception on the same stack frame)
1726              */
1727             qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
1728             v7m_exception_taken(cpu, excret, true, false);
1729             return;
1730         }
1731 
1732         /*
1733          * Returning from an exception with a PC with bit 0 set is defined
1734          * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
1735          * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
1736          * the lsbit, and there are several RTOSes out there which incorrectly
1737          * assume the r15 in the stack frame should be a Thumb-style "lsbit
1738          * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
1739          * complain about the badly behaved guest.
1740          */
1741         if (env->regs[15] & 1) {
1742             env->regs[15] &= ~1U;
1743             if (!arm_feature(env, ARM_FEATURE_V8)) {
1744                 qemu_log_mask(LOG_GUEST_ERROR,
1745                               "M profile return from interrupt with misaligned "
1746                               "PC is UNPREDICTABLE on v7M\n");
1747             }
1748         }
1749 
1750         if (arm_feature(env, ARM_FEATURE_V8)) {
1751             /*
1752              * For v8M we have to check whether the xPSR exception field
1753              * matches the EXCRET value for return to handler/thread
1754              * before we commit to changing the SP and xPSR.
1755              */
1756             bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
1757             if (return_to_handler != will_be_handler) {
1758                 /*
1759                  * Take an INVPC UsageFault on the current stack.
1760                  * By this point we will have switched to the security state
1761                  * for the background state, so this UsageFault will target
1762                  * that state.
1763                  */
1764                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1765                                         env->v7m.secure);
1766                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1767                 qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
1768                               "stackframe: failed exception return integrity "
1769                               "check\n");
1770                 v7m_exception_taken(cpu, excret, true, false);
1771                 return;
1772             }
1773         }
1774 
1775         if (!ftype) {
1776             /* FP present and we need to handle it */
1777             if (!return_to_secure &&
1778                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPACT_MASK)) {
1779                 armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
1780                 env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
1781                 qemu_log_mask(CPU_LOG_INT,
1782                               "...taking SecureFault on existing stackframe: "
1783                               "Secure LSPACT set but exception return is "
1784                               "not to secure state\n");
1785                 v7m_exception_taken(cpu, excret, true, false);
1786                 return;
1787             }
1788 
1789             restore_s16_s31 = return_to_secure &&
1790                 (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK);
1791 
1792             if (env->v7m.fpccr[return_to_secure] & R_V7M_FPCCR_LSPACT_MASK) {
1793                 /* State in FPU is still valid, just clear LSPACT */
1794                 env->v7m.fpccr[return_to_secure] &= ~R_V7M_FPCCR_LSPACT_MASK;
1795             } else {
1796                 int i;
1797                 uint32_t fpscr;
1798                 bool cpacr_pass, nsacr_pass;
1799 
1800                 cpacr_pass = v7m_cpacr_pass(env, return_to_secure,
1801                                             return_to_priv);
1802                 nsacr_pass = return_to_secure ||
1803                     extract32(env->v7m.nsacr, 10, 1);
1804 
1805                 if (!cpacr_pass) {
1806                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1807                                             return_to_secure);
1808                     env->v7m.cfsr[return_to_secure] |= R_V7M_CFSR_NOCP_MASK;
1809                     qemu_log_mask(CPU_LOG_INT,
1810                                   "...taking UsageFault on existing "
1811                                   "stackframe: CPACR.CP10 prevents unstacking "
1812                                   "FP regs\n");
1813                     v7m_exception_taken(cpu, excret, true, false);
1814                     return;
1815                 } else if (!nsacr_pass) {
1816                     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, true);
1817                     env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_INVPC_MASK;
1818                     qemu_log_mask(CPU_LOG_INT,
1819                                   "...taking Secure UsageFault on existing "
1820                                   "stackframe: NSACR.CP10 prevents unstacking "
1821                                   "FP regs\n");
1822                     v7m_exception_taken(cpu, excret, true, false);
1823                     return;
1824                 }
1825 
1826                 for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1827                     uint32_t slo, shi;
1828                     uint64_t dn;
1829                     uint32_t faddr = frameptr + 0x20 + 4 * i;
1830 
1831                     if (i >= 16) {
1832                         faddr += 8; /* Skip the slot for the FPSCR and VPR */
1833                     }
1834 
1835                     pop_ok = pop_ok &&
1836                         v7m_stack_read(cpu, &slo, faddr, mmu_idx) &&
1837                         v7m_stack_read(cpu, &shi, faddr + 4, mmu_idx);
1838 
1839                     if (!pop_ok) {
1840                         break;
1841                     }
1842 
1843                     dn = (uint64_t)shi << 32 | slo;
1844                     *aa32_vfp_dreg(env, i / 2) = dn;
1845                 }
1846                 pop_ok = pop_ok &&
1847                     v7m_stack_read(cpu, &fpscr, frameptr + 0x60, mmu_idx);
1848                 if (pop_ok) {
1849                     vfp_set_fpscr(env, fpscr);
1850                 }
1851                 if (cpu_isar_feature(aa32_mve, cpu)) {
1852                     pop_ok = pop_ok &&
1853                         v7m_stack_read(cpu, &env->v7m.vpr,
1854                                        frameptr + 0x64, mmu_idx);
1855                 }
1856                 if (!pop_ok) {
1857                     /*
1858                      * These regs are 0 if security extension present;
1859                      * otherwise merely UNKNOWN. We zero always.
1860                      */
1861                     for (i = 0; i < (restore_s16_s31 ? 32 : 16); i += 2) {
1862                         *aa32_vfp_dreg(env, i / 2) = 0;
1863                     }
1864                     vfp_set_fpscr(env, 0);
1865                     if (cpu_isar_feature(aa32_mve, cpu)) {
1866                         env->v7m.vpr = 0;
1867                     }
1868                 }
1869             }
1870         }
1871         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1872                                                V7M_CONTROL, FPCA, !ftype);
1873 
1874         /* Commit to consuming the stack frame */
1875         frameptr += 0x20;
1876         if (!ftype) {
1877             frameptr += 0x48;
1878             if (restore_s16_s31) {
1879                 frameptr += 0x40;
1880             }
1881         }
1882         /*
1883          * Undo stack alignment (the SPREALIGN bit indicates that the original
1884          * pre-exception SP was not 8-aligned and we added a padding word to
1885          * align it, so we undo this by ORing in the bit that increases it
1886          * from the current 8-aligned value to the 8-unaligned value. (Adding 4
1887          * would work too but a logical OR is how the pseudocode specifies it.)
1888          */
1889         if (xpsr & XPSR_SPREALIGN) {
1890             frameptr |= 4;
1891         }
1892         *frame_sp_p = frameptr;
1893     }
1894 
1895     xpsr_mask = ~(XPSR_SPREALIGN | XPSR_SFPA);
1896     if (!arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
1897         xpsr_mask &= ~XPSR_GE;
1898     }
1899     /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
1900     xpsr_write(env, xpsr, xpsr_mask);
1901 
1902     if (env->v7m.secure) {
1903         bool sfpa = xpsr & XPSR_SFPA;
1904 
1905         env->v7m.control[M_REG_S] = FIELD_DP32(env->v7m.control[M_REG_S],
1906                                                V7M_CONTROL, SFPA, sfpa);
1907     }
1908 
1909     /*
1910      * The restored xPSR exception field will be zero if we're
1911      * resuming in Thread mode. If that doesn't match what the
1912      * exception return excret specified then this is a UsageFault.
1913      * v7M requires we make this check here; v8M did it earlier.
1914      */
1915     if (return_to_handler != arm_v7m_is_handler_mode(env)) {
1916         /*
1917          * Take an INVPC UsageFault by pushing the stack again;
1918          * we know we're v7M so this is never a Secure UsageFault.
1919          */
1920         bool ignore_stackfaults;
1921 
1922         assert(!arm_feature(env, ARM_FEATURE_V8));
1923         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
1924         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1925         ignore_stackfaults = v7m_push_stack(cpu);
1926         qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
1927                       "failed exception return integrity check\n");
1928         v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
1929         return;
1930     }
1931 
1932     /* Otherwise, we have a successful exception exit. */
1933     arm_clear_exclusive(env);
1934     arm_rebuild_hflags(env);
1935     qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
1936 }
1937 
1938 static bool do_v7m_function_return(ARMCPU *cpu)
1939 {
1940     /*
1941      * v8M security extensions magic function return.
1942      * We may either:
1943      *  (1) throw an exception (longjump)
1944      *  (2) return true if we successfully handled the function return
1945      *  (3) return false if we failed a consistency check and have
1946      *      pended a UsageFault that needs to be taken now
1947      *
1948      * At this point the magic return value is split between env->regs[15]
1949      * and env->thumb. We don't bother to reconstitute it because we don't
1950      * need it (all values are handled the same way).
1951      */
1952     CPUARMState *env = &cpu->env;
1953     uint32_t newpc, newpsr, newpsr_exc;
1954 
1955     qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
1956 
1957     {
1958         bool threadmode, spsel;
1959         MemOpIdx oi;
1960         ARMMMUIdx mmu_idx;
1961         uint32_t *frame_sp_p;
1962         uint32_t frameptr;
1963 
1964         /* Pull the return address and IPSR from the Secure stack */
1965         threadmode = !arm_v7m_is_handler_mode(env);
1966         spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
1967 
1968         frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
1969         frameptr = *frame_sp_p;
1970 
1971         /*
1972          * These loads may throw an exception (for MPU faults). We want to
1973          * do them as secure, so work out what MMU index that is.
1974          */
1975         mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
1976         oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
1977         newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0);
1978         newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0);
1979 
1980         /* Consistency checks on new IPSR */
1981         newpsr_exc = newpsr & XPSR_EXCP;
1982         if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
1983               (env->v7m.exception == 1 && newpsr_exc != 0))) {
1984             /* Pend the fault and tell our caller to take it */
1985             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
1986             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
1987                                     env->v7m.secure);
1988             qemu_log_mask(CPU_LOG_INT,
1989                           "...taking INVPC UsageFault: "
1990                           "IPSR consistency check failed\n");
1991             return false;
1992         }
1993 
1994         *frame_sp_p = frameptr + 8;
1995     }
1996 
1997     /* This invalidates frame_sp_p */
1998     switch_v7m_security_state(env, true);
1999     env->v7m.exception = newpsr_exc;
2000     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2001     if (newpsr & XPSR_SFPA) {
2002         env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
2003     }
2004     xpsr_write(env, 0, XPSR_IT);
2005     env->thumb = newpc & 1;
2006     env->regs[15] = newpc & ~1;
2007     arm_rebuild_hflags(env);
2008 
2009     qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
2010     return true;
2011 }
2012 
2013 static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx, bool secure,
2014                                uint32_t addr, uint16_t *insn)
2015 {
2016     /*
2017      * Load a 16-bit portion of a v7M instruction, returning true on success,
2018      * or false on failure (in which case we will have pended the appropriate
2019      * exception).
2020      * We need to do the instruction fetch's MPU and SAU checks
2021      * like this because there is no MMU index that would allow
2022      * doing the load with a single function call. Instead we must
2023      * first check that the security attributes permit the load
2024      * and that they don't mismatch on the two halves of the instruction,
2025      * and then we do the load as a secure load (ie using the security
2026      * attributes of the address, not the CPU, as architecturally required).
2027      */
2028     CPUState *cs = CPU(cpu);
2029     CPUARMState *env = &cpu->env;
2030     V8M_SAttributes sattrs = {};
2031     GetPhysAddrResult res = {};
2032     ARMMMUFaultInfo fi = {};
2033     MemTxResult txres;
2034 
2035     v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, secure, &sattrs);
2036     if (!sattrs.nsc || sattrs.ns) {
2037         /*
2038          * This must be the second half of the insn, and it straddles a
2039          * region boundary with the second half not being S&NSC.
2040          */
2041         env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2042         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2043         qemu_log_mask(CPU_LOG_INT,
2044                       "...really SecureFault with SFSR.INVEP\n");
2045         return false;
2046     }
2047     if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx, &res, &fi)) {
2048         /* the MPU lookup failed */
2049         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2050         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
2051         qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
2052         return false;
2053     }
2054     *insn = address_space_lduw_le(arm_addressspace(cs, res.f.attrs),
2055                                   res.f.phys_addr, res.f.attrs, &txres);
2056     if (txres != MEMTX_OK) {
2057         env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2058         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2059         qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
2060         return false;
2061     }
2062     return true;
2063 }
2064 
2065 static bool v7m_read_sg_stack_word(ARMCPU *cpu, ARMMMUIdx mmu_idx,
2066                                    uint32_t addr, uint32_t *spdata)
2067 {
2068     /*
2069      * Read a word of data from the stack for the SG instruction,
2070      * writing the value into *spdata. If the load succeeds, return
2071      * true; otherwise pend an appropriate exception and return false.
2072      * (We can't use data load helpers here that throw an exception
2073      * because of the context we're called in, which is halfway through
2074      * arm_v7m_cpu_do_interrupt().)
2075      */
2076     CPUState *cs = CPU(cpu);
2077     CPUARMState *env = &cpu->env;
2078     MemTxResult txres;
2079     GetPhysAddrResult res = {};
2080     ARMMMUFaultInfo fi = {};
2081     uint32_t value;
2082 
2083     if (get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi)) {
2084         /* MPU/SAU lookup failed */
2085         if (fi.type == ARMFault_QEMU_SFault) {
2086             qemu_log_mask(CPU_LOG_INT,
2087                           "...SecureFault during stack word read\n");
2088             env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK | R_V7M_SFSR_SFARVALID_MASK;
2089             env->v7m.sfar = addr;
2090             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2091         } else {
2092             qemu_log_mask(CPU_LOG_INT,
2093                           "...MemManageFault during stack word read\n");
2094             env->v7m.cfsr[M_REG_S] |= R_V7M_CFSR_DACCVIOL_MASK |
2095                 R_V7M_CFSR_MMARVALID_MASK;
2096             env->v7m.mmfar[M_REG_S] = addr;
2097             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, false);
2098         }
2099         return false;
2100     }
2101     value = address_space_ldl(arm_addressspace(cs, res.f.attrs),
2102                               res.f.phys_addr, res.f.attrs, &txres);
2103     if (txres != MEMTX_OK) {
2104         /* BusFault trying to read the data */
2105         qemu_log_mask(CPU_LOG_INT,
2106                       "...BusFault during stack word read\n");
2107         env->v7m.cfsr[M_REG_NS] |=
2108             (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2109         env->v7m.bfar = addr;
2110         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2111         return false;
2112     }
2113 
2114     *spdata = value;
2115     return true;
2116 }
2117 
2118 static bool v7m_handle_execute_nsc(ARMCPU *cpu)
2119 {
2120     /*
2121      * Check whether this attempt to execute code in a Secure & NS-Callable
2122      * memory region is for an SG instruction; if so, then emulate the
2123      * effect of the SG instruction and return true. Otherwise pend
2124      * the correct kind of exception and return false.
2125      */
2126     CPUARMState *env = &cpu->env;
2127     ARMMMUIdx mmu_idx;
2128     uint16_t insn;
2129 
2130     /*
2131      * We should never get here unless get_phys_addr_pmsav8() caused
2132      * an exception for NS executing in S&NSC memory.
2133      */
2134     assert(!env->v7m.secure);
2135     assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2136 
2137     /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
2138     mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
2139 
2140     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15], &insn)) {
2141         return false;
2142     }
2143 
2144     if (!env->thumb) {
2145         goto gen_invep;
2146     }
2147 
2148     if (insn != 0xe97f) {
2149         /*
2150          * Not an SG instruction first half (we choose the IMPDEF
2151          * early-SG-check option).
2152          */
2153         goto gen_invep;
2154     }
2155 
2156     if (!v7m_read_half_insn(cpu, mmu_idx, true, env->regs[15] + 2, &insn)) {
2157         return false;
2158     }
2159 
2160     if (insn != 0xe97f) {
2161         /*
2162          * Not an SG instruction second half (yes, both halves of the SG
2163          * insn have the same hex value)
2164          */
2165         goto gen_invep;
2166     }
2167 
2168     /*
2169      * OK, we have confirmed that we really have an SG instruction.
2170      * We know we're NS in S memory so don't need to repeat those checks.
2171      */
2172     qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
2173                   ", executing it\n", env->regs[15]);
2174 
2175     if (cpu_isar_feature(aa32_m_sec_state, cpu) &&
2176         !arm_v7m_is_handler_mode(env)) {
2177         /*
2178          * v8.1M exception stack frame integrity check. Note that we
2179          * must perform the memory access even if CCR_S.TRD is zero
2180          * and we aren't going to check what the data loaded is.
2181          */
2182         uint32_t spdata, sp;
2183 
2184         /*
2185          * We know we are currently NS, so the S stack pointers must be
2186          * in other_ss_{psp,msp}, not in regs[13]/other_sp.
2187          */
2188         sp = v7m_using_psp(env) ? env->v7m.other_ss_psp : env->v7m.other_ss_msp;
2189         if (!v7m_read_sg_stack_word(cpu, mmu_idx, sp, &spdata)) {
2190             /* Stack access failed and an exception has been pended */
2191             return false;
2192         }
2193 
2194         if (env->v7m.ccr[M_REG_S] & R_V7M_CCR_TRD_MASK) {
2195             if (((spdata & ~1) == 0xfefa125a) ||
2196                 !(env->v7m.control[M_REG_S] & 1)) {
2197                 goto gen_invep;
2198             }
2199         }
2200     }
2201 
2202     env->regs[14] &= ~1;
2203     env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2204     switch_v7m_security_state(env, true);
2205     xpsr_write(env, 0, XPSR_IT);
2206     env->regs[15] += 4;
2207     arm_rebuild_hflags(env);
2208     return true;
2209 
2210 gen_invep:
2211     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2212     armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2213     qemu_log_mask(CPU_LOG_INT,
2214                   "...really SecureFault with SFSR.INVEP\n");
2215     return false;
2216 }
2217 
2218 void arm_v7m_cpu_do_interrupt(CPUState *cs)
2219 {
2220     ARMCPU *cpu = ARM_CPU(cs);
2221     CPUARMState *env = &cpu->env;
2222     uint32_t lr;
2223     bool ignore_stackfaults;
2224 
2225     arm_log_exception(cs);
2226 
2227     /*
2228      * For exceptions we just mark as pending on the NVIC, and let that
2229      * handle it.
2230      */
2231     switch (cs->exception_index) {
2232     case EXCP_UDEF:
2233         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2234         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
2235         break;
2236     case EXCP_NOCP:
2237     {
2238         /*
2239          * NOCP might be directed to something other than the current
2240          * security state if this fault is because of NSACR; we indicate
2241          * the target security state using exception.target_el.
2242          */
2243         int target_secstate;
2244 
2245         if (env->exception.target_el == 3) {
2246             target_secstate = M_REG_S;
2247         } else {
2248             target_secstate = env->v7m.secure;
2249         }
2250         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, target_secstate);
2251         env->v7m.cfsr[target_secstate] |= R_V7M_CFSR_NOCP_MASK;
2252         break;
2253     }
2254     case EXCP_INVSTATE:
2255         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2256         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
2257         break;
2258     case EXCP_STKOF:
2259         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2260         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_STKOF_MASK;
2261         break;
2262     case EXCP_LSERR:
2263         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2264         env->v7m.sfsr |= R_V7M_SFSR_LSERR_MASK;
2265         break;
2266     case EXCP_UNALIGNED:
2267         /* Unaligned faults reported by M-profile aware code */
2268         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2269         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2270         break;
2271     case EXCP_DIVBYZERO:
2272         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
2273         env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_DIVBYZERO_MASK;
2274         break;
2275     case EXCP_SWI:
2276         /* The PC already points to the next instruction.  */
2277         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
2278         break;
2279     case EXCP_PREFETCH_ABORT:
2280     case EXCP_DATA_ABORT:
2281         /*
2282          * Note that for M profile we don't have a guest facing FSR, but
2283          * the env->exception.fsr will be populated by the code that
2284          * raises the fault, in the A profile short-descriptor format.
2285          *
2286          * Log the exception.vaddress now regardless of subtype, because
2287          * logging below only logs it when it goes into a guest visible
2288          * register.
2289          */
2290         qemu_log_mask(CPU_LOG_INT, "...at fault address 0x%x\n",
2291                       (uint32_t)env->exception.vaddress);
2292         switch (env->exception.fsr & 0xf) {
2293         case M_FAKE_FSR_NSC_EXEC:
2294             /*
2295              * Exception generated when we try to execute code at an address
2296              * which is marked as Secure & Non-Secure Callable and the CPU
2297              * is in the Non-Secure state. The only instruction which can
2298              * be executed like this is SG (and that only if both halves of
2299              * the SG instruction have the same security attributes.)
2300              * Everything else must generate an INVEP SecureFault, so we
2301              * emulate the SG instruction here.
2302              */
2303             if (v7m_handle_execute_nsc(cpu)) {
2304                 return;
2305             }
2306             break;
2307         case M_FAKE_FSR_SFAULT:
2308             /*
2309              * Various flavours of SecureFault for attempts to execute or
2310              * access data in the wrong security state.
2311              */
2312             switch (cs->exception_index) {
2313             case EXCP_PREFETCH_ABORT:
2314                 if (env->v7m.secure) {
2315                     env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
2316                     qemu_log_mask(CPU_LOG_INT,
2317                                   "...really SecureFault with SFSR.INVTRAN\n");
2318                 } else {
2319                     env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
2320                     qemu_log_mask(CPU_LOG_INT,
2321                                   "...really SecureFault with SFSR.INVEP\n");
2322                 }
2323                 break;
2324             case EXCP_DATA_ABORT:
2325                 /* This must be an NS access to S memory */
2326                 env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
2327                 qemu_log_mask(CPU_LOG_INT,
2328                               "...really SecureFault with SFSR.AUVIOL\n");
2329                 break;
2330             }
2331             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
2332             break;
2333         case 0x8: /* External Abort */
2334             switch (cs->exception_index) {
2335             case EXCP_PREFETCH_ABORT:
2336                 env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
2337                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
2338                 break;
2339             case EXCP_DATA_ABORT:
2340                 env->v7m.cfsr[M_REG_NS] |=
2341                     (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
2342                 env->v7m.bfar = env->exception.vaddress;
2343                 qemu_log_mask(CPU_LOG_INT,
2344                               "...with CFSR.PRECISERR and BFAR 0x%x\n",
2345                               env->v7m.bfar);
2346                 break;
2347             }
2348             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
2349             break;
2350         case 0x1: /* Alignment fault reported by generic code */
2351             qemu_log_mask(CPU_LOG_INT,
2352                           "...really UsageFault with UFSR.UNALIGNED\n");
2353             env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNALIGNED_MASK;
2354             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
2355                                     env->v7m.secure);
2356             break;
2357         default:
2358             /*
2359              * All other FSR values are either MPU faults or "can't happen
2360              * for M profile" cases.
2361              */
2362             switch (cs->exception_index) {
2363             case EXCP_PREFETCH_ABORT:
2364                 env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
2365                 qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
2366                 break;
2367             case EXCP_DATA_ABORT:
2368                 env->v7m.cfsr[env->v7m.secure] |=
2369                     (R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
2370                 env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
2371                 qemu_log_mask(CPU_LOG_INT,
2372                               "...with CFSR.DACCVIOL and MMFAR 0x%x\n",
2373                               env->v7m.mmfar[env->v7m.secure]);
2374                 break;
2375             }
2376             armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
2377                                     env->v7m.secure);
2378             break;
2379         }
2380         break;
2381     case EXCP_SEMIHOST:
2382         qemu_log_mask(CPU_LOG_INT,
2383                       "...handling as semihosting call 0x%x\n",
2384                       env->regs[0]);
2385 #ifdef CONFIG_TCG
2386         do_common_semihosting(cs);
2387 #else
2388         g_assert_not_reached();
2389 #endif
2390         env->regs[15] += env->thumb ? 2 : 4;
2391         return;
2392     case EXCP_BKPT:
2393         armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
2394         break;
2395     case EXCP_IRQ:
2396         break;
2397     case EXCP_EXCEPTION_EXIT:
2398         if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
2399             /* Must be v8M security extension function return */
2400             assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
2401             assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
2402             if (do_v7m_function_return(cpu)) {
2403                 return;
2404             }
2405         } else {
2406             do_v7m_exception_exit(cpu);
2407             return;
2408         }
2409         break;
2410     case EXCP_LAZYFP:
2411         /*
2412          * We already pended the specific exception in the NVIC in the
2413          * v7m_preserve_fp_state() helper function.
2414          */
2415         break;
2416     default:
2417         cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
2418         return; /* Never happens.  Keep compiler happy.  */
2419     }
2420 
2421     if (arm_feature(env, ARM_FEATURE_V8)) {
2422         lr = R_V7M_EXCRET_RES1_MASK |
2423             R_V7M_EXCRET_DCRS_MASK;
2424         /*
2425          * The S bit indicates whether we should return to Secure
2426          * or NonSecure (ie our current state).
2427          * The ES bit indicates whether we're taking this exception
2428          * to Secure or NonSecure (ie our target state). We set it
2429          * later, in v7m_exception_taken().
2430          * The SPSEL bit is also set in v7m_exception_taken() for v8M.
2431          * This corresponds to the ARM ARM pseudocode for v8M setting
2432          * some LR bits in PushStack() and some in ExceptionTaken();
2433          * the distinction matters for the tailchain cases where we
2434          * can take an exception without pushing the stack.
2435          */
2436         if (env->v7m.secure) {
2437             lr |= R_V7M_EXCRET_S_MASK;
2438         }
2439     } else {
2440         lr = R_V7M_EXCRET_RES1_MASK |
2441             R_V7M_EXCRET_S_MASK |
2442             R_V7M_EXCRET_DCRS_MASK |
2443             R_V7M_EXCRET_ES_MASK;
2444         if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
2445             lr |= R_V7M_EXCRET_SPSEL_MASK;
2446         }
2447     }
2448     if (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK)) {
2449         lr |= R_V7M_EXCRET_FTYPE_MASK;
2450     }
2451     if (!arm_v7m_is_handler_mode(env)) {
2452         lr |= R_V7M_EXCRET_MODE_MASK;
2453     }
2454 
2455     ignore_stackfaults = v7m_push_stack(cpu);
2456     v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
2457 }
2458 
2459 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
2460 {
2461     unsigned el = arm_current_el(env);
2462 
2463     /* First handle registers which unprivileged can read */
2464     switch (reg) {
2465     case 0 ... 7: /* xPSR sub-fields */
2466         return v7m_mrs_xpsr(env, reg, el);
2467     case 20: /* CONTROL */
2468         return v7m_mrs_control(env, env->v7m.secure);
2469     case 0x94: /* CONTROL_NS */
2470         /*
2471          * We have to handle this here because unprivileged Secure code
2472          * can read the NS CONTROL register.
2473          */
2474         if (!env->v7m.secure) {
2475             return 0;
2476         }
2477         return env->v7m.control[M_REG_NS] |
2478             (env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK);
2479     }
2480 
2481     if (el == 0) {
2482         return 0; /* unprivileged reads others as zero */
2483     }
2484 
2485     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2486         switch (reg) {
2487         case 0x88: /* MSP_NS */
2488             if (!env->v7m.secure) {
2489                 return 0;
2490             }
2491             return env->v7m.other_ss_msp;
2492         case 0x89: /* PSP_NS */
2493             if (!env->v7m.secure) {
2494                 return 0;
2495             }
2496             return env->v7m.other_ss_psp;
2497         case 0x8a: /* MSPLIM_NS */
2498             if (!env->v7m.secure) {
2499                 return 0;
2500             }
2501             return env->v7m.msplim[M_REG_NS];
2502         case 0x8b: /* PSPLIM_NS */
2503             if (!env->v7m.secure) {
2504                 return 0;
2505             }
2506             return env->v7m.psplim[M_REG_NS];
2507         case 0x90: /* PRIMASK_NS */
2508             if (!env->v7m.secure) {
2509                 return 0;
2510             }
2511             return env->v7m.primask[M_REG_NS];
2512         case 0x91: /* BASEPRI_NS */
2513             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2514                 goto bad_reg;
2515             }
2516             if (!env->v7m.secure) {
2517                 return 0;
2518             }
2519             return env->v7m.basepri[M_REG_NS];
2520         case 0x93: /* FAULTMASK_NS */
2521             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2522                 goto bad_reg;
2523             }
2524             if (!env->v7m.secure) {
2525                 return 0;
2526             }
2527             return env->v7m.faultmask[M_REG_NS];
2528         case 0x98: /* SP_NS */
2529         {
2530             /*
2531              * This gives the non-secure SP selected based on whether we're
2532              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2533              */
2534             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2535 
2536             if (!env->v7m.secure) {
2537                 return 0;
2538             }
2539             if (!arm_v7m_is_handler_mode(env) && spsel) {
2540                 return env->v7m.other_ss_psp;
2541             } else {
2542                 return env->v7m.other_ss_msp;
2543             }
2544         }
2545         default:
2546             break;
2547         }
2548     }
2549 
2550     switch (reg) {
2551     case 8: /* MSP */
2552         return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
2553     case 9: /* PSP */
2554         return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
2555     case 10: /* MSPLIM */
2556         if (!arm_feature(env, ARM_FEATURE_V8)) {
2557             goto bad_reg;
2558         }
2559         return env->v7m.msplim[env->v7m.secure];
2560     case 11: /* PSPLIM */
2561         if (!arm_feature(env, ARM_FEATURE_V8)) {
2562             goto bad_reg;
2563         }
2564         return env->v7m.psplim[env->v7m.secure];
2565     case 16: /* PRIMASK */
2566         return env->v7m.primask[env->v7m.secure];
2567     case 17: /* BASEPRI */
2568     case 18: /* BASEPRI_MAX */
2569         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2570             goto bad_reg;
2571         }
2572         return env->v7m.basepri[env->v7m.secure];
2573     case 19: /* FAULTMASK */
2574         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2575             goto bad_reg;
2576         }
2577         return env->v7m.faultmask[env->v7m.secure];
2578     default:
2579     bad_reg:
2580         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
2581                                        " register %d\n", reg);
2582         return 0;
2583     }
2584 }
2585 
2586 void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
2587 {
2588     /*
2589      * We're passed bits [11..0] of the instruction; extract
2590      * SYSm and the mask bits.
2591      * Invalid combinations of SYSm and mask are UNPREDICTABLE;
2592      * we choose to treat them as if the mask bits were valid.
2593      * NB that the pseudocode 'mask' variable is bits [11..10],
2594      * whereas ours is [11..8].
2595      */
2596     uint32_t mask = extract32(maskreg, 8, 4);
2597     uint32_t reg = extract32(maskreg, 0, 8);
2598     int cur_el = arm_current_el(env);
2599 
2600     if (cur_el == 0 && reg > 7 && reg != 20) {
2601         /*
2602          * only xPSR sub-fields and CONTROL.SFPA may be written by
2603          * unprivileged code
2604          */
2605         return;
2606     }
2607 
2608     if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
2609         switch (reg) {
2610         case 0x88: /* MSP_NS */
2611             if (!env->v7m.secure) {
2612                 return;
2613             }
2614             env->v7m.other_ss_msp = val & ~3;
2615             return;
2616         case 0x89: /* PSP_NS */
2617             if (!env->v7m.secure) {
2618                 return;
2619             }
2620             env->v7m.other_ss_psp = val & ~3;
2621             return;
2622         case 0x8a: /* MSPLIM_NS */
2623             if (!env->v7m.secure) {
2624                 return;
2625             }
2626             env->v7m.msplim[M_REG_NS] = val & ~7;
2627             return;
2628         case 0x8b: /* PSPLIM_NS */
2629             if (!env->v7m.secure) {
2630                 return;
2631             }
2632             env->v7m.psplim[M_REG_NS] = val & ~7;
2633             return;
2634         case 0x90: /* PRIMASK_NS */
2635             if (!env->v7m.secure) {
2636                 return;
2637             }
2638             env->v7m.primask[M_REG_NS] = val & 1;
2639             return;
2640         case 0x91: /* BASEPRI_NS */
2641             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2642                 goto bad_reg;
2643             }
2644             if (!env->v7m.secure) {
2645                 return;
2646             }
2647             env->v7m.basepri[M_REG_NS] = val & 0xff;
2648             return;
2649         case 0x93: /* FAULTMASK_NS */
2650             if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2651                 goto bad_reg;
2652             }
2653             if (!env->v7m.secure) {
2654                 return;
2655             }
2656             env->v7m.faultmask[M_REG_NS] = val & 1;
2657             return;
2658         case 0x94: /* CONTROL_NS */
2659             if (!env->v7m.secure) {
2660                 return;
2661             }
2662             write_v7m_control_spsel_for_secstate(env,
2663                                                  val & R_V7M_CONTROL_SPSEL_MASK,
2664                                                  M_REG_NS);
2665             if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
2666                 env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
2667                 env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
2668             }
2669             /*
2670              * SFPA is RAZ/WI from NS. FPCA is RO if NSACR.CP10 == 0,
2671              * RES0 if the FPU is not present, and is stored in the S bank
2672              */
2673             if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env)) &&
2674                 extract32(env->v7m.nsacr, 10, 1)) {
2675                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2676                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2677             }
2678             return;
2679         case 0x98: /* SP_NS */
2680         {
2681             /*
2682              * This gives the non-secure SP selected based on whether we're
2683              * currently in handler mode or not, using the NS CONTROL.SPSEL.
2684              */
2685             bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
2686             bool is_psp = !arm_v7m_is_handler_mode(env) && spsel;
2687             uint32_t limit;
2688 
2689             if (!env->v7m.secure) {
2690                 return;
2691             }
2692 
2693             limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false];
2694 
2695             val &= ~0x3;
2696 
2697             if (val < limit) {
2698                 raise_exception_ra(env, EXCP_STKOF, 0, 1, GETPC());
2699             }
2700 
2701             if (is_psp) {
2702                 env->v7m.other_ss_psp = val;
2703             } else {
2704                 env->v7m.other_ss_msp = val;
2705             }
2706             return;
2707         }
2708         default:
2709             break;
2710         }
2711     }
2712 
2713     switch (reg) {
2714     case 0 ... 7: /* xPSR sub-fields */
2715         v7m_msr_xpsr(env, mask, reg, val);
2716         break;
2717     case 8: /* MSP */
2718         if (v7m_using_psp(env)) {
2719             env->v7m.other_sp = val & ~3;
2720         } else {
2721             env->regs[13] = val & ~3;
2722         }
2723         break;
2724     case 9: /* PSP */
2725         if (v7m_using_psp(env)) {
2726             env->regs[13] = val & ~3;
2727         } else {
2728             env->v7m.other_sp = val & ~3;
2729         }
2730         break;
2731     case 10: /* MSPLIM */
2732         if (!arm_feature(env, ARM_FEATURE_V8)) {
2733             goto bad_reg;
2734         }
2735         env->v7m.msplim[env->v7m.secure] = val & ~7;
2736         break;
2737     case 11: /* PSPLIM */
2738         if (!arm_feature(env, ARM_FEATURE_V8)) {
2739             goto bad_reg;
2740         }
2741         env->v7m.psplim[env->v7m.secure] = val & ~7;
2742         break;
2743     case 16: /* PRIMASK */
2744         env->v7m.primask[env->v7m.secure] = val & 1;
2745         break;
2746     case 17: /* BASEPRI */
2747         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2748             goto bad_reg;
2749         }
2750         env->v7m.basepri[env->v7m.secure] = val & 0xff;
2751         break;
2752     case 18: /* BASEPRI_MAX */
2753         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2754             goto bad_reg;
2755         }
2756         val &= 0xff;
2757         if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
2758                          || env->v7m.basepri[env->v7m.secure] == 0)) {
2759             env->v7m.basepri[env->v7m.secure] = val;
2760         }
2761         break;
2762     case 19: /* FAULTMASK */
2763         if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
2764             goto bad_reg;
2765         }
2766         env->v7m.faultmask[env->v7m.secure] = val & 1;
2767         break;
2768     case 20: /* CONTROL */
2769         /*
2770          * Writing to the SPSEL bit only has an effect if we are in
2771          * thread mode; other bits can be updated by any privileged code.
2772          * write_v7m_control_spsel() deals with updating the SPSEL bit in
2773          * env->v7m.control, so we only need update the others.
2774          * For v7M, we must just ignore explicit writes to SPSEL in handler
2775          * mode; for v8M the write is permitted but will have no effect.
2776          * All these bits are writes-ignored from non-privileged code,
2777          * except for SFPA.
2778          */
2779         if (cur_el > 0 && (arm_feature(env, ARM_FEATURE_V8) ||
2780                            !arm_v7m_is_handler_mode(env))) {
2781             write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
2782         }
2783         if (cur_el > 0 && arm_feature(env, ARM_FEATURE_M_MAIN)) {
2784             env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
2785             env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
2786         }
2787         if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
2788             /*
2789              * SFPA is RAZ/WI from NS or if no FPU.
2790              * FPCA is RO if NSACR.CP10 == 0, RES0 if the FPU is not present.
2791              * Both are stored in the S bank.
2792              */
2793             if (env->v7m.secure) {
2794                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
2795                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_SFPA_MASK;
2796             }
2797             if (cur_el > 0 &&
2798                 (env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_SECURITY) ||
2799                  extract32(env->v7m.nsacr, 10, 1))) {
2800                 env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_FPCA_MASK;
2801                 env->v7m.control[M_REG_S] |= val & R_V7M_CONTROL_FPCA_MASK;
2802             }
2803         }
2804         break;
2805     default:
2806     bad_reg:
2807         qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
2808                                        " register %d\n", reg);
2809         return;
2810     }
2811 }
2812 
2813 uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
2814 {
2815     /* Implement the TT instruction. op is bits [7:6] of the insn. */
2816     bool forceunpriv = op & 1;
2817     bool alt = op & 2;
2818     V8M_SAttributes sattrs = {};
2819     uint32_t tt_resp;
2820     bool r, rw, nsr, nsrw, mrvalid;
2821     ARMMMUIdx mmu_idx;
2822     uint32_t mregion;
2823     bool targetpriv;
2824     bool targetsec = env->v7m.secure;
2825 
2826     /*
2827      * Work out what the security state and privilege level we're
2828      * interested in is...
2829      */
2830     if (alt) {
2831         targetsec = !targetsec;
2832     }
2833 
2834     if (forceunpriv) {
2835         targetpriv = false;
2836     } else {
2837         targetpriv = arm_v7m_is_handler_mode(env) ||
2838             !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
2839     }
2840 
2841     /* ...and then figure out which MMU index this is */
2842     mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
2843 
2844     /*
2845      * We know that the MPU and SAU don't care about the access type
2846      * for our purposes beyond that we don't want to claim to be
2847      * an insn fetch, so we arbitrarily call this a read.
2848      */
2849 
2850     /*
2851      * MPU region info only available for privileged or if
2852      * inspecting the other MPU state.
2853      */
2854     if (arm_current_el(env) != 0 || alt) {
2855         GetPhysAddrResult res = {};
2856         ARMMMUFaultInfo fi = {};
2857 
2858         /* We can ignore the return value as prot is always set */
2859         pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec,
2860                           &res, &fi, &mregion);
2861         if (mregion == -1) {
2862             mrvalid = false;
2863             mregion = 0;
2864         } else {
2865             mrvalid = true;
2866         }
2867         r = res.f.prot & PAGE_READ;
2868         rw = res.f.prot & PAGE_WRITE;
2869     } else {
2870         r = false;
2871         rw = false;
2872         mrvalid = false;
2873         mregion = 0;
2874     }
2875 
2876     if (env->v7m.secure) {
2877         v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
2878                             targetsec, &sattrs);
2879         nsr = sattrs.ns && r;
2880         nsrw = sattrs.ns && rw;
2881     } else {
2882         sattrs.ns = true;
2883         nsr = false;
2884         nsrw = false;
2885     }
2886 
2887     tt_resp = (sattrs.iregion << 24) |
2888         (sattrs.irvalid << 23) |
2889         ((!sattrs.ns) << 22) |
2890         (nsrw << 21) |
2891         (nsr << 20) |
2892         (rw << 19) |
2893         (r << 18) |
2894         (sattrs.srvalid << 17) |
2895         (mrvalid << 16) |
2896         (sattrs.sregion << 8) |
2897         mregion;
2898 
2899     return tt_resp;
2900 }
2901 
2902 #endif /* !CONFIG_USER_ONLY */
2903